return nil
}
start := (1 << 14) * piece
- c.logger.WithDefaultLevel(log.Debug).Printf("sending metadata piece %d", piece)
+ c.protocolLogger.WithDefaultLevel(log.Debug).Printf("sending metadata piece %d", piece)
c.write(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.metadataBytes[start:start+t.metadataPieceSize(piece)]))
return nil
case pp.RejectMetadataExtensionMsgType:
}
}
c.peerImpl = c
- c.logger = cl.logger.WithDefaultLevel(log.Warning)
- c.logger = c.logger.WithContextText(fmt.Sprintf("%T %p", c, c)).WithNames(protocolLoggingName)
+ c.logger = cl.logger.WithDefaultLevel(log.Warning).WithContextText(fmt.Sprintf("%T %p", c, c))
+ c.protocolLogger = c.logger.WithNames(protocolLoggingName)
c.setRW(connStatsReadWriter{nc, c})
c.r = &rateLimitedReader{
l: cl.config.DownloadRateLimiter,
type PeerConn struct {
Peer
+ // Move to PeerConn?
+ protocolLogger log.Logger
+
// BEP 52
v2 bool
if index < len(cn.metadataRequests) && cn.metadataRequests[index] {
return
}
- cn.logger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index)
+ cn.protocolLogger.WithDefaultLevel(log.Debug).Printf("requesting metadata piece %d", index)
cn.write(pp.MetadataExtensionRequestMsg(eID, index))
for index >= len(cn.metadataRequests) {
cn.metadataRequests = append(cn.metadataRequests, false)
}
if opt := c.maximumPeerRequestChunkLength(); opt.Ok && int(r.Length) > opt.Value {
err := fmt.Errorf("peer requested chunk too long (%v)", r.Length)
- c.logger.Levelf(log.Warning, err.Error())
+ c.protocolLogger.Levelf(log.Warning, err.Error())
if c.fastEnabled() {
c.reject(r)
return nil
// If fast isn't enabled, I think we would have wiped all peer requests when we last
// choked, and requests while we're choking would be ignored. It could be possible that
// a peer request data read completed concurrently to it being deleted elsewhere.
- c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+ c.protocolLogger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
}
// Choking a non-fast peer should cause them to flush all their requests.
c.choke(c.write)
}
func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) {
- c.logger.WithContextText(fmt.Sprintf(
+ c.protocolLogger.WithContextText(fmt.Sprintf(
"peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(),
)).SkipCallers(1).Levelf(level, format, arg...)
}
if preservedCount != 0 {
// TODO: Yes this is a debug log but I'm not happy with the state of the logging lib
// right now.
- c.logger.Levelf(log.Debug,
+ c.protocolLogger.Levelf(log.Debug,
"%v requests were preserved while being choked (fast=%v)",
preservedCount,
c.fastEnabled())
req := newRequestFromMessage(&msg)
if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) {
err = fmt.Errorf("received invalid reject for request %v", req)
- c.logger.Levelf(log.Debug, "%v", err)
+ c.protocolLogger.Levelf(log.Debug, "%v", err)
}
case pp.AllowedFast:
torrent.Add("allowed fasts received", 1)
if id == pp.HandshakeExtendedID {
var d pp.ExtendedHandshakeMessage
if err := bencode.Unmarshal(payload, &d); err != nil {
- c.logger.Printf("error parsing extended handshake message %q: %s", payload, err)
+ c.protocolLogger.Printf("error parsing extended handshake message %q: %s", payload, err)
return fmt.Errorf("unmarshalling extended handshake payload: %w", err)
}
// Trigger this callback after it's been processed. If you want to handle it yourself, you
panic(length)
}
if length%2 != 0 {
- pc.logger.Levelf(log.Warning, "requesting odd hashes length %d", length)
+ pc.protocolLogger.Levelf(log.Warning, "requesting odd hashes length %d", length)
}
msg := pp.Message{
Type: pp.HashRequest,
metainfo.HashForPiecePad(int64(pc.t.usualPieceSize())))
expectedPiecesRoot := file.piecesRoot.Unwrap()
if root == expectedPiecesRoot {
- pc.logger.WithNames(v2HashesLogName).Levelf(
+ pc.protocolLogger.WithNames(v2HashesLogName).Levelf(
log.Info,
"got piece hashes for file %v (num pieces %v)",
file, file.numPieces())
pc.t.piece(torrentPieceIndex).hashV2.Set(peerHash)
}
} else {
- pc.logger.WithNames(v2HashesLogName).Levelf(
+ pc.protocolLogger.WithNames(v2HashesLogName).Levelf(
log.Debug,
"peer file piece hashes root mismatch: %x != %x",
root, expectedPiecesRoot)
readyfn func()
torrent *Torrent
Listed bool
- info log.Logger
- dbg log.Logger
+ logger log.Logger
// Running record of live connections the remote end of the connection purports to have.
remoteLiveConns map[netip.AddrPort]g.Option[pp.PexPeerFlags]
lastRecv time.Time
s.xid = xid
s.last = nil
s.torrent = c.t
- s.info = c.t.cl.logger.WithDefaultLevel(log.Info)
- s.dbg = c.logger.WithDefaultLevel(log.Debug)
+ s.logger = c.logger.WithDefaultLevel(log.Debug).WithNames("pex")
s.readyfn = c.tickleWriter
s.gate = make(chan struct{}, 1)
s.timer = time.AfterFunc(0, func() {
select {
case <-s.gate:
if tx := s.genmsg(); tx != nil {
- s.dbg.Print("sending PEX message: ", tx)
+ s.logger.Print("sending PEX message: ", tx)
flow := postfn(tx.Message(s.xid))
s.sched(pexInterval)
return flow
if err != nil {
return fmt.Errorf("unmarshalling pex message: %w", err)
}
- s.dbg.Printf("received pex message: %v", rx)
+ s.logger.Printf("received pex message: %v", rx)
torrent.Add("pex added peers received", int64(len(rx.Added)))
torrent.Add("pex added6 peers received", int64(len(rx.Added6)))
peers.AppendFromPex(rx.Added6, rx.Added6Flags)
peers.AppendFromPex(rx.Added, rx.AddedFlags)
if time.Now().Before(s.torrent.pex.rest) {
- s.dbg.Printf("in cooldown period, incoming PEX discarded")
+ s.logger.Printf("in cooldown period, incoming PEX discarded")
return nil
}
added := s.torrent.addPeers(peers)
- s.dbg.Printf("got %v peers over pex, added %v", len(peers), added)
+ s.logger.Printf("got %v peers over pex, added %v", len(peers), added)
if len(peers) > 0 {
s.torrent.pex.rest = time.Now().Add(pexInterval)