From 08c403ea52153a22d6f599e03741fbfce4a0e492 Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Sun, 28 May 2023 12:04:49 +1000 Subject: [PATCH] Fix logging reported in #841 --- internal/alloclim/r.go | 6 +++++- peerconn.go | 28 ++++++++++++---------------- testing.go | 3 +++ torrent.go | 2 +- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/internal/alloclim/r.go b/internal/alloclim/r.go index b84be667..71a4dd79 100644 --- a/internal/alloclim/r.go +++ b/internal/alloclim/r.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/anacrolix/chansync" + "github.com/anacrolix/log" ) type Reservation struct { @@ -75,7 +76,10 @@ func (me *Reservation) wake() bool { func (me *Reservation) Wait(ctx context.Context) error { if me.n > me.l.Max { - return fmt.Errorf("reservation for %v exceeds limiter max %v", me.n, me.l.Max) + return log.WithLevel( + log.Warning, + fmt.Errorf("reservation for %v exceeds limiter max %v", me.n, me.l.Max), + ) } select { case <-ctx.Done(): diff --git a/peerconn.go b/peerconn.go index 26a475e4..62f632d5 100644 --- a/peerconn.go +++ b/peerconn.go @@ -589,7 +589,16 @@ func (c *PeerConn) onReadRequest(r Request, startFetch bool) error { } func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) { - b, err := c.readPeerRequestData(r, prs) + // Should we depend on Torrent closure here? I think it's okay to get cancelled from elsewhere, + // or fail to read and then cleanup. Also, we used to hang here if the reservation was never + // dropped, that was fixed. + ctx := context.Background() + err := prs.allocReservation.Wait(ctx) + if err != nil { + c.logger.WithDefaultLevel(log.Debug).Levelf(log.ErrorLevel(err), "waiting for alloc limit reservation: %v", err) + return + } + b, err := c.readPeerRequestData(r) c.locker().Lock() defer c.locker().Unlock() if err != nil { @@ -615,7 +624,7 @@ func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) { // https://github.com/anacrolix/torrent/issues/702#issuecomment-1000953313. logLevel = log.Debug } - c.logger.WithDefaultLevel(logLevel).Printf("error reading chunk for peer Request %v: %v", r, err) + c.logger.Levelf(logLevel, "error reading chunk for peer Request %v: %v", r, err) if c.t.closed.IsSet() { return } @@ -646,20 +655,7 @@ func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) { } } -func (c *PeerConn) readPeerRequestData(r Request, prs *peerRequestState) ([]byte, error) { - // Should we depend on Torrent closure here? I think it's okay to get cancelled from elsewhere, - // or fail to read and then cleanup. - ctx := context.Background() - err := prs.allocReservation.Wait(ctx) - if err != nil { - if ctx.Err() == nil { - // The error is from the reservation itself. Something is very broken, or we're not - // guarding against excessively large requests. - err = log.WithLevel(log.Critical, err) - } - err = fmt.Errorf("waiting for alloc limit reservation: %w", err) - return nil, err - } +func (c *PeerConn) readPeerRequestData(r Request) ([]byte, error) { b := make([]byte, r.Length) p := c.t.info.Piece(int(r.Index)) n, err := c.t.readAt(b, p.Offset()+int64(r.Begin)) diff --git a/testing.go b/testing.go index d3f04b66..6fb54112 100644 --- a/testing.go +++ b/testing.go @@ -21,6 +21,9 @@ func TestingConfig(t testing.TB) *ClientConfig { cfg.KeepAliveTimeout = time.Millisecond cfg.MinPeerExtensions.SetBit(pp.ExtensionBitFast, true) cfg.Logger = log.Default.WithContextText(t.Name()) + // 2 would suffice for the greeting test, but 5 is needed for a few other tests. This should be + // something slightly higher than the usual chunk size, so it gets tickled in some tests. + cfg.MaxAllocPeerRequestDataPerConn = 5 //cfg.Debug = true //cfg.Logger = cfg.Logger.WithText(func(m log.Msg) string { // t := m.Text() diff --git a/torrent.go b/torrent.go index a2b85a4d..6cc2e87f 100644 --- a/torrent.go +++ b/torrent.go @@ -2189,7 +2189,7 @@ func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) { if len(bannableTouchers) >= 1 { c := bannableTouchers[0] if len(bannableTouchers) != 1 { - t.logger.Levelf(log.Warning, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece) + t.logger.Levelf(log.Debug, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece) } else { // Turns out it's still useful to ban peers like this because if there's only a // single peer for a piece, and we never progress that piece to completion, we -- 2.44.0