]> Sergey Matveev's repositories - btrtrc.git/blobdiff - peerconn.go
Drop support for go 1.20
[btrtrc.git] / peerconn.go
index a9d0baad2696e0fc5e0161b2a9dc723bc162dcea..e2d944ff269bb94426772d60f267bb359582d156 100644 (file)
@@ -3,134 +3,35 @@ package torrent
 import (
        "bufio"
        "bytes"
+       "context"
        "errors"
        "fmt"
        "io"
        "math/rand"
        "net"
-       "sort"
+       "net/netip"
        "strconv"
        "strings"
+       "sync/atomic"
        "time"
 
        "github.com/RoaringBitmap/roaring"
+       "github.com/anacrolix/generics"
+       . "github.com/anacrolix/generics"
        "github.com/anacrolix/log"
-       "github.com/anacrolix/missinggo/iter"
        "github.com/anacrolix/missinggo/v2/bitmap"
        "github.com/anacrolix/multiless"
+       "golang.org/x/exp/maps"
+       "golang.org/x/time/rate"
 
-       "github.com/anacrolix/chansync"
        "github.com/anacrolix/torrent/bencode"
+       "github.com/anacrolix/torrent/internal/alloclim"
        "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/mse"
        pp "github.com/anacrolix/torrent/peer_protocol"
-       request_strategy "github.com/anacrolix/torrent/request-strategy"
+       utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch"
 )
 
-type PeerSource string
-
-const (
-       PeerSourceTracker         = "Tr"
-       PeerSourceIncoming        = "I"
-       PeerSourceDhtGetPeers     = "Hg" // Peers we found by searching a DHT.
-       PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT.
-       PeerSourcePex             = "X"
-       // The peer was given directly, such as through a magnet link.
-       PeerSourceDirect = "M"
-)
-
-type peerRequestState struct {
-       data []byte
-}
-
-type PeerRemoteAddr interface {
-       String() string
-}
-
-// Since we have to store all the requests in memory, we can't reasonably exceed what would be
-// indexable with the memory space available.
-type (
-       maxRequests  = int
-       requestState = request_strategy.PeerNextRequestState
-)
-
-type Peer struct {
-       // First to ensure 64-bit alignment for atomics. See #262.
-       _stats ConnStats
-
-       t *Torrent
-
-       peerImpl
-       callbacks *Callbacks
-
-       outgoing   bool
-       Network    string
-       RemoteAddr PeerRemoteAddr
-       // True if the connection is operating over MSE obfuscation.
-       headerEncrypted bool
-       cryptoMethod    mse.CryptoMethod
-       Discovery       PeerSource
-       trusted         bool
-       closed          chansync.SetOnce
-       // Set true after we've added our ConnStats generated during handshake to
-       // other ConnStat instances as determined when the *Torrent became known.
-       reconciledHandshakeStats bool
-
-       lastMessageReceived     time.Time
-       completedHandshake      time.Time
-       lastUsefulChunkReceived time.Time
-       lastChunkSent           time.Time
-
-       // Stuff controlled by the local peer.
-       needRequestUpdate    string
-       actualRequestState   requestState
-       updateRequestsTimer  *time.Timer
-       cancelledRequests    roaring.Bitmap
-       lastBecameInterested time.Time
-       priorInterest        time.Duration
-
-       lastStartedExpectingToReceiveChunks time.Time
-       cumulativeExpectedToReceiveChunks   time.Duration
-       _chunksReceivedWhileExpecting       int64
-
-       choking                                bool
-       piecesReceivedSinceLastRequestUpdate   maxRequests
-       maxPiecesReceivedBetweenRequestUpdates maxRequests
-       // Chunks that we might reasonably expect to receive from the peer. Due to
-       // latency, buffering, and implementation differences, we may receive
-       // chunks that are no longer in the set of requests actually want.
-       validReceiveChunks map[RequestIndex]int
-       // Indexed by metadata piece, set to true if posted and pending a
-       // response.
-       metadataRequests []bool
-       sentHaves        bitmap.Bitmap
-
-       // Stuff controlled by the remote peer.
-       peerInterested        bool
-       peerChoking           bool
-       peerRequests          map[Request]*peerRequestState
-       PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake
-       PeerListenPort        int
-       // The pieces the peer has claimed to have.
-       _peerPieces roaring.Bitmap
-       // The peer has everything. This can occur due to a special message, when
-       // we may not even know the number of pieces in the torrent yet.
-       peerSentHaveAll bool
-       // The highest possible number of pieces the torrent could have based on
-       // communication with the peer. Generally only useful until we have the
-       // torrent info.
-       peerMinPieces pieceIndex
-       // Pieces we've accepted chunks for from the peer.
-       peerTouchedPieces map[pieceIndex]struct{}
-       peerAllowedFast   roaring.Bitmap
-
-       PeerMaxRequests  maxRequests // Maximum pending requests the peer allows.
-       PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
-       PeerClientName   string
-
-       logger log.Logger
-}
-
 // Maintains the state of a BitTorrent-protocol based connection with a peer.
 type PeerConn struct {
        Peer
@@ -142,8 +43,10 @@ type PeerConn struct {
        // See BEP 3 etc.
        PeerID             PeerID
        PeerExtensionBytes pp.PeerExtensionBits
+       PeerListenPort     int
 
-       // The actual Conn, used for closing, and setting socket options.
+       // The actual Conn, used for closing, and setting socket options. Do not use methods on this
+       // while holding any mutexes.
        conn net.Conn
        // The Reader and Writer for this Conn, with hooks installed for stats,
        // limiting, deadlines etc.
@@ -152,51 +55,60 @@ type PeerConn struct {
 
        messageWriter peerConnMsgWriter
 
-       uploadTimer *time.Timer
-       pex         pexConnState
-}
+       PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
+       PeerClientName   atomic.Value
+       uploadTimer      *time.Timer
+       pex              pexConnState
 
-func (cn *PeerConn) connStatusString() string {
-       return fmt.Sprintf("%+-55q %s %s", cn.PeerID, cn.PeerExtensionBytes, cn.connString)
-}
+       // The pieces the peer has claimed to have.
+       _peerPieces roaring.Bitmap
+       // The peer has everything. This can occur due to a special message, when
+       // we may not even know the number of pieces in the torrent yet.
+       peerSentHaveAll bool
 
-func (cn *Peer) updateExpectingChunks() {
-       if cn.expectingChunks() {
-               if cn.lastStartedExpectingToReceiveChunks.IsZero() {
-                       cn.lastStartedExpectingToReceiveChunks = time.Now()
-               }
-       } else {
-               if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
-                       cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks)
-                       cn.lastStartedExpectingToReceiveChunks = time.Time{}
-               }
-       }
+       peerRequestDataAllocLimiter alloclim.Limiter
+
+       outstandingHolepunchingRendezvous map[netip.AddrPort]struct{}
 }
 
-func (cn *Peer) expectingChunks() bool {
-       if cn.actualRequestState.Requests.IsEmpty() {
-               return false
+func (cn *PeerConn) pexStatus() string {
+       if !cn.bitExtensionEnabled(pp.ExtensionBitLtep) {
+               return "extended protocol disabled"
        }
-       if !cn.actualRequestState.Interested {
-               return false
+       if cn.PeerExtensionIDs == nil {
+               return "pending extended handshake"
        }
-       if !cn.peerChoking {
-               return true
+       if !cn.supportsExtension(pp.ExtensionNamePex) {
+               return "unsupported"
+       }
+       if true {
+               return fmt.Sprintf(
+                       "%v conns, %v unsent events",
+                       len(cn.pex.remoteLiveConns),
+                       cn.pex.numPending(),
+               )
+       } else {
+               // This alternative branch prints out the remote live conn addresses.
+               return fmt.Sprintf(
+                       "%v conns, %v unsent events",
+                       strings.Join(generics.SliceMap(
+                               maps.Keys(cn.pex.remoteLiveConns),
+                               func(from netip.AddrPort) string {
+                                       return from.String()
+                               }), ","),
+                       cn.pex.numPending(),
+               )
        }
-       haveAllowedFastRequests := false
-       cn.peerAllowedFast.Iterate(func(i uint32) bool {
-               haveAllowedFastRequests = roaringBitmapRangeCardinality(
-                       &cn.actualRequestState.Requests,
-                       cn.t.pieceRequestIndexOffset(pieceIndex(i)),
-                       cn.t.pieceRequestIndexOffset(pieceIndex(i+1)),
-               ) == 0
-               return !haveAllowedFastRequests
-       })
-       return haveAllowedFastRequests
 }
 
-func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool {
-       return cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(piece))
+func (cn *PeerConn) peerImplStatusLines() []string {
+       return []string{
+               cn.connString,
+               fmt.Sprintf("peer id: %+q", cn.PeerID),
+               fmt.Sprintf("extensions: %v", cn.PeerExtensionBytes),
+               fmt.Sprintf("ltep extensions: %v", cn.PeerExtensionIDs),
+               fmt.Sprintf("pex: %s", cn.pexStatus()),
+       }
 }
 
 // Returns true if the connection is over IPv6.
@@ -208,64 +120,33 @@ func (cn *PeerConn) ipv6() bool {
        return len(ip) == net.IPv6len
 }
 
-// Returns true the if the dialer/initiator has the lower client peer ID. TODO: Find the
-// specification for this.
+// Returns true the if the dialer/initiator has the higher client peer ID. See
+// https://github.com/arvidn/libtorrent/blame/272828e1cc37b042dfbbafa539222d8533e99755/src/bt_peer_connection.cpp#L3536-L3557.
+// As far as I can tell, Transmission just keeps the oldest connection.
 func (cn *PeerConn) isPreferredDirection() bool {
-       return bytes.Compare(cn.t.cl.peerID[:], cn.PeerID[:]) < 0 == cn.outgoing
+       // True if our client peer ID is higher than the remote's peer ID.
+       return bytes.Compare(cn.PeerID[:], cn.t.cl.peerID[:]) < 0 == cn.outgoing
 }
 
 // Returns whether the left connection should be preferred over the right one,
 // considering only their networking properties. If ok is false, we can't
 // decide.
-func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) (left, ok bool) {
-       var ml multiLess
-       ml.NextBool(l.isPreferredDirection(), r.isPreferredDirection())
-       ml.NextBool(!l.utp(), !r.utp())
-       ml.NextBool(l.ipv6(), r.ipv6())
-       return ml.FinalOk()
+func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) bool {
+       var ml multiless.Computation
+       ml = ml.Bool(r.isPreferredDirection(), l.isPreferredDirection())
+       ml = ml.Bool(l.utp(), r.utp())
+       ml = ml.Bool(r.ipv6(), l.ipv6())
+       return ml.Less()
 }
 
-func (cn *Peer) cumInterest() time.Duration {
-       ret := cn.priorInterest
-       if cn.actualRequestState.Interested {
-               ret += time.Since(cn.lastBecameInterested)
-       }
-       return ret
-}
-
-func (cn *Peer) peerHasAllPieces() (all bool, known bool) {
+func (cn *PeerConn) peerHasAllPieces() (all, known bool) {
        if cn.peerSentHaveAll {
                return true, true
        }
        if !cn.t.haveInfo() {
                return false, false
        }
-       return roaring.Flip(&cn._peerPieces, 0, bitmap.BitRange(cn.t.numPieces())).IsEmpty(), true
-}
-
-func (cn *Peer) locker() *lockWithDeferreds {
-       return cn.t.cl.locker()
-}
-
-func (cn *Peer) supportsExtension(ext pp.ExtensionName) bool {
-       _, ok := cn.PeerExtensionIDs[ext]
-       return ok
-}
-
-// The best guess at number of pieces in the torrent for this peer.
-func (cn *Peer) bestPeerNumPieces() pieceIndex {
-       if cn.t.haveInfo() {
-               return cn.t.numPieces()
-       }
-       return cn.peerMinPieces
-}
-
-func (cn *Peer) completedString() string {
-       have := pieceIndex(cn._peerPieces.GetCardinality())
-       if cn.peerSentHaveAll {
-               have = cn.bestPeerNumPieces()
-       }
-       return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces())
+       return cn._peerPieces.GetCardinality() == uint64(cn.t.numPieces()), true
 }
 
 func (cn *PeerConn) onGotInfo(info *metainfo.Info) {
@@ -279,11 +160,8 @@ func (cn *PeerConn) setNumPieces(num pieceIndex) {
        cn.peerPiecesChanged()
 }
 
-func eventAgeString(t time.Time) string {
-       if t.IsZero() {
-               return "never"
-       }
-       return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds())
+func (cn *PeerConn) peerPieces() *roaring.Bitmap {
+       return &cn._peerPieces
 }
 
 func (cn *PeerConn) connectionFlags() (ret string) {
@@ -306,136 +184,19 @@ func (cn *PeerConn) utp() bool {
        return parseNetworkString(cn.Network).Udp
 }
 
-// Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text.
-func (cn *Peer) statusFlags() (ret string) {
-       c := func(b byte) {
-               ret += string([]byte{b})
-       }
-       if cn.actualRequestState.Interested {
-               c('i')
-       }
-       if cn.choking {
-               c('c')
-       }
-       c('-')
-       ret += cn.connectionFlags()
-       c('-')
-       if cn.peerInterested {
-               c('i')
-       }
-       if cn.peerChoking {
-               c('c')
-       }
-       return
-}
-
-func (cn *Peer) downloadRate() float64 {
-       num := cn._stats.BytesReadUsefulData.Int64()
-       if num == 0 {
-               return 0
-       }
-       return float64(num) / cn.totalExpectingTime().Seconds()
-}
-
-func (cn *Peer) numRequestsByPiece() (ret map[pieceIndex]int) {
-       ret = make(map[pieceIndex]int)
-       cn.actualRequestState.Requests.Iterate(func(x uint32) bool {
-               ret[pieceIndex(x/cn.t.chunksPerRegularPiece())]++
-               return true
-       })
-       return
-}
-
-func (cn *Peer) writeStatus(w io.Writer, t *Torrent) {
-       // \t isn't preserved in <pre> blocks?
-       if cn.closed.IsSet() {
-               fmt.Fprint(w, "CLOSED: ")
-       }
-       fmt.Fprintln(w, cn.connStatusString())
-       prio, err := cn.peerPriority()
-       prioStr := fmt.Sprintf("%08x", prio)
-       if err != nil {
-               prioStr += ": " + err.Error()
-       }
-       fmt.Fprintf(w, "    bep40-prio: %v\n", prioStr)
-       fmt.Fprintf(w, "    last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n",
-               eventAgeString(cn.lastMessageReceived),
-               eventAgeString(cn.completedHandshake),
-               eventAgeString(cn.lastHelpful()),
-               cn.cumInterest(),
-               cn.totalExpectingTime(),
-       )
-       fmt.Fprintf(w,
-               "    %s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d-%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n",
-               cn.completedString(),
-               len(cn.peerTouchedPieces),
-               &cn._stats.ChunksReadUseful,
-               &cn._stats.ChunksRead,
-               &cn._stats.ChunksWritten,
-               cn.actualRequestState.Requests.GetCardinality(),
-               cn.cancelledRequests.GetCardinality(),
-               cn.nominalMaxRequests(),
-               cn.PeerMaxRequests,
-               len(cn.peerRequests),
-               localClientReqq,
-               cn.statusFlags(),
-               cn.downloadRate()/(1<<10),
-       )
-       fmt.Fprintf(w, "    requested pieces:")
-       type pieceNumRequestsType struct {
-               piece       pieceIndex
-               numRequests int
-       }
-       var pieceNumRequests []pieceNumRequestsType
-       for piece, count := range cn.numRequestsByPiece() {
-               pieceNumRequests = append(pieceNumRequests, pieceNumRequestsType{piece, count})
-       }
-       sort.Slice(pieceNumRequests, func(i, j int) bool {
-               return pieceNumRequests[i].piece < pieceNumRequests[j].piece
-       })
-       for _, elem := range pieceNumRequests {
-               fmt.Fprintf(w, " %v(%v)", elem.piece, elem.numRequests)
-       }
-       fmt.Fprintf(w, "\n")
-}
-
-func (p *Peer) close() {
-       if !p.closed.Set() {
-               return
-       }
-       if p.updateRequestsTimer != nil {
-               p.updateRequestsTimer.Stop()
-       }
-       p.peerImpl.onClose()
-       if p.t != nil {
-               p.t.decPeerPieceAvailability(p)
-       }
-       for _, f := range p.callbacks.PeerClosed {
-               f(p)
-       }
-}
-
 func (cn *PeerConn) onClose() {
        if cn.pex.IsEnabled() {
                cn.pex.Close()
        }
        cn.tickleWriter()
        if cn.conn != nil {
-               cn.conn.Close()
+               go cn.conn.Close()
        }
        if cb := cn.callbacks.PeerConnClosed; cb != nil {
                cb(cn)
        }
 }
 
-func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
-       return cn.peerSentHaveAll || cn._peerPieces.Contains(bitmap.BitIndex(piece))
-}
-
-// 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
-// https://github.com/pion/datachannel/issues/59 is fixed.
-const writeBufferHighWaterLen = 1 << 15
-
 // Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is
 // done asynchronously, so it may be that we're not able to honour backpressure from this method.
 func (cn *PeerConn) write(msg pp.Message) bool {
@@ -470,20 +231,6 @@ func (cn *PeerConn) requestedMetadataPiece(index int) bool {
        return index < len(cn.metadataRequests) && cn.metadataRequests[index]
 }
 
-// The actual value to use as the maximum outbound requests.
-func (cn *Peer) nominalMaxRequests() (ret maxRequests) {
-       return maxRequests(clamp(1, int64(cn.PeerMaxRequests), 2048))
-}
-
-func (cn *Peer) totalExpectingTime() (ret time.Duration) {
-       ret = cn.cumulativeExpectedToReceiveChunks
-       if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
-               ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
-       }
-       return
-
-}
-
 func (cn *PeerConn) onPeerSentCancel(r Request) {
        if _, ok := cn.peerRequests[r]; !ok {
                torrent.Add("unexpected cancels received", 1)
@@ -504,17 +251,19 @@ func (cn *PeerConn) choke(msg messageWriter) (more bool) {
        more = msg(pp.Message{
                Type: pp.Choke,
        })
-       if cn.fastEnabled() {
-               for r := range cn.peerRequests {
-                       // TODO: Don't reject pieces in allowed fast set.
-                       cn.reject(r)
-               }
-       } else {
-               cn.peerRequests = nil
+       if !cn.fastEnabled() {
+               cn.deleteAllPeerRequests()
        }
        return
 }
 
+func (cn *PeerConn) deleteAllPeerRequests() {
+       for _, state := range cn.peerRequests {
+               state.allocReservation.Drop()
+       }
+       cn.peerRequests = nil
+}
+
 func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool {
        if !cn.choking {
                return true
@@ -525,21 +274,6 @@ func (cn *PeerConn) unchoke(msg func(pp.Message) bool) bool {
        })
 }
 
-func (cn *Peer) setInterested(interested bool) bool {
-       if cn.actualRequestState.Interested == interested {
-               return true
-       }
-       cn.actualRequestState.Interested = interested
-       if interested {
-               cn.lastBecameInterested = time.Now()
-       } else if !cn.lastBecameInterested.IsZero() {
-               cn.priorInterest += time.Since(cn.lastBecameInterested)
-       }
-       cn.updateExpectingChunks()
-       // log.Printf("%p: setting interest: %v", cn, interested)
-       return cn.writeInterested(interested)
-}
-
 func (pc *PeerConn) writeInterested(interested bool) bool {
        return pc.write(pp.Message{
                Type: func() pp.MessageType {
@@ -552,57 +286,6 @@ func (pc *PeerConn) writeInterested(interested bool) bool {
        })
 }
 
-// The function takes a message to be sent, and returns true if more messages
-// are okay.
-type messageWriter func(pp.Message) bool
-
-func (cn *Peer) shouldRequest(r RequestIndex) error {
-       pi := pieceIndex(r / cn.t.chunksPerRegularPiece())
-       if !cn.peerHasPiece(pi) {
-               return errors.New("requesting piece peer doesn't have")
-       }
-       if !cn.t.peerIsActive(cn) {
-               panic("requesting but not in active conns")
-       }
-       if cn.closed.IsSet() {
-               panic("requesting when connection is closed")
-       }
-       if cn.t.hashingPiece(pi) {
-               panic("piece is being hashed")
-       }
-       if cn.t.pieceQueuedForHash(pi) {
-               panic("piece is queued for hash")
-       }
-       if cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(pi)) {
-               panic("peer choking and piece not allowed fast")
-       }
-       return nil
-}
-
-func (cn *Peer) request(r RequestIndex) (more bool, err error) {
-       if err := cn.shouldRequest(r); err != nil {
-               panic(err)
-       }
-       if cn.actualRequestState.Requests.Contains(r) {
-               return true, nil
-       }
-       if maxRequests(cn.actualRequestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
-               return true, errors.New("too many outstanding requests")
-       }
-       cn.actualRequestState.Requests.Add(r)
-       if cn.validReceiveChunks == nil {
-               cn.validReceiveChunks = make(map[RequestIndex]int)
-       }
-       cn.validReceiveChunks[r]++
-       cn.t.pendingRequests.Inc(r)
-       cn.updateExpectingChunks()
-       ppReq := cn.t.requestIndexToRequest(r)
-       for _, f := range cn.callbacks.SentRequest {
-               f(PeerRequestEvent{cn, ppReq})
-       }
-       return cn.peerImpl._request(ppReq), nil
-}
-
 func (me *PeerConn) _request(r Request) bool {
        return me.write(pp.Message{
                Type:   pp.Request,
@@ -612,35 +295,23 @@ func (me *PeerConn) _request(r Request) bool {
        })
 }
 
-func (me *Peer) cancel(r RequestIndex) bool {
-       if !me.actualRequestState.Requests.Contains(r) {
-               return true
-       }
-       return me._cancel(r)
-}
-
 func (me *PeerConn) _cancel(r RequestIndex) bool {
-       if me.cancelledRequests.Contains(r) {
-               // Already cancelled and waiting for a response.
-               return true
-       }
-       if me.fastEnabled() {
-               me.cancelledRequests.Add(r)
-       } else {
-               if !me.deleteRequest(r) {
-                       panic("request not existing should have been guarded")
-               }
-               if me.actualRequestState.Requests.IsEmpty() {
-                       me.updateRequests("Peer.cancel")
-               }
-       }
-       return me.write(makeCancelMessage(me.t.requestIndexToRequest(r)))
+       me.write(makeCancelMessage(me.t.requestIndexToRequest(r)))
+       // Transmission does not send rejects for received cancels. See
+       // https://github.com/transmission/transmission/pull/2275.
+       return me.fastEnabled() && !me.remoteIsTransmission()
 }
 
 func (cn *PeerConn) fillWriteBuffer() {
-       if !cn.maybeUpdateActualRequestState() {
-               return
-       }
+       if cn.messageWriter.writeBuffer.Len() > writeBufferLowWaterLen {
+               // Fully committing to our max requests requires sufficient space (see
+               // maxLocalToRemoteRequests). Flush what we have instead. We also prefer always to make
+               // requests than to do PEX or upload, so we short-circuit before handling those. Any update
+               // request reason will not be cleared, so we'll come right back here when there's space. We
+               // can't do this in maybeUpdateActualRequestState because it's a method on Peer and has no
+               // knowledge of write buffers.
+       }
+       cn.maybeUpdateActualRequestState()
        if cn.pex.IsEnabled() {
                if flow := cn.pex.Share(cn.write); !flow {
                        return
@@ -674,47 +345,11 @@ func (cn *PeerConn) postBitfield() {
        cn.sentHaves = bitmap.Bitmap{cn.t._completedPieces.Clone()}
 }
 
-// Sets a reason to update requests, and if there wasn't already one, handle it.
-func (cn *Peer) updateRequests(reason string) {
-       if cn.needRequestUpdate != "" {
-               return
-       }
-       cn.needRequestUpdate = reason
-       cn.handleUpdateRequests()
-}
-
 func (cn *PeerConn) handleUpdateRequests() {
        // The writer determines the request state as needed when it can write.
        cn.tickleWriter()
 }
 
-// Emits the indices in the Bitmaps bms in order, never repeating any index.
-// skip is mutated during execution, and its initial values will never be
-// emitted.
-func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func {
-       return func(cb iter.Callback) {
-               for _, bm := range bms {
-                       if !iter.All(
-                               func(_i interface{}) bool {
-                                       i := _i.(int)
-                                       if skip.Contains(bitmap.BitIndex(i)) {
-                                               return true
-                                       }
-                                       skip.Add(bitmap.BitIndex(i))
-                                       return cb(i)
-                               },
-                               bm.Iter,
-                       ) {
-                               return
-                       }
-               }
-       }
-}
-
-func (cn *Peer) peerPiecesChanged() {
-       cn.t.maybeDropMutuallyCompletePeer(cn)
-}
-
 func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
        if newMin > cn.peerMinPieces {
                cn.peerMinPieces = newMin
@@ -750,42 +385,62 @@ func (cn *PeerConn) peerSentBitfield(bf []bool) error {
                // Ignore known excess pieces.
                bf = bf[:cn.t.numPieces()]
        }
-       pp := cn.newPeerPieces()
+       bm := boolSliceToBitmap(bf)
+       if cn.t.haveInfo() && pieceIndex(bm.GetCardinality()) == cn.t.numPieces() {
+               cn.onPeerHasAllPieces()
+               return nil
+       }
+       if !bm.IsEmpty() {
+               cn.raisePeerMinPieces(pieceIndex(bm.Maximum()) + 1)
+       }
+       shouldUpdateRequests := false
+       if cn.peerSentHaveAll {
+               if !cn.t.deleteConnWithAllPieces(&cn.Peer) {
+                       panic(cn)
+               }
+               cn.peerSentHaveAll = false
+               if !cn._peerPieces.IsEmpty() {
+                       panic("if peer has all, we expect no individual peer pieces to be set")
+               }
+       } else {
+               bm.Xor(&cn._peerPieces)
+       }
        cn.peerSentHaveAll = false
-       for i, have := range bf {
-               if have {
-                       cn.raisePeerMinPieces(pieceIndex(i) + 1)
-                       if !pp.Contains(bitmap.BitIndex(i)) {
-                               cn.t.incPieceAvailability(i)
-                       }
+       // bm is now 'on' for pieces that are changing
+       bm.Iterate(func(x uint32) bool {
+               pi := pieceIndex(x)
+               if cn._peerPieces.Contains(x) {
+                       // Then we must be losing this piece
+                       cn.t.decPieceAvailability(pi)
                } else {
-                       if pp.Contains(bitmap.BitIndex(i)) {
-                               cn.t.decPieceAvailability(i)
+                       if !shouldUpdateRequests && cn.t.wantPieceIndex(pieceIndex(x)) {
+                               shouldUpdateRequests = true
                        }
+                       // We must be gaining this piece
+                       cn.t.incPieceAvailability(pieceIndex(x))
                }
-               if have {
-                       cn._peerPieces.Add(uint32(i))
-                       if cn.t.wantPieceIndex(i) {
-                               cn.updateRequests("bitfield")
-                       }
-               } else {
-                       cn._peerPieces.Remove(uint32(i))
-               }
+               return true
+       })
+       // Apply the changes. If we had everything previously, this should be empty, so xor is the same
+       // as or.
+       cn._peerPieces.Xor(&bm)
+       if shouldUpdateRequests {
+               cn.updateRequests("bitfield")
        }
+       // We didn't guard this before, I see no reason to do it now.
        cn.peerPiecesChanged()
        return nil
 }
 
-func (cn *Peer) onPeerHasAllPieces() {
+func (cn *PeerConn) onPeerHasAllPieces() {
        t := cn.t
        if t.haveInfo() {
-               npp, pc := cn.newPeerPieces(), t.numPieces()
-               for i := 0; i < pc; i += 1 {
-                       if !npp.Contains(bitmap.BitIndex(i)) {
-                               t.incPieceAvailability(i)
-                       }
-               }
+               cn._peerPieces.Iterate(func(x uint32) bool {
+                       t.decPieceAvailability(pieceIndex(x))
+                       return true
+               })
        }
+       t.addConnWithAllPieces(&cn.Peer)
        cn.peerSentHaveAll = true
        cn._peerPieces.Clear()
        if !cn.t._pendingPieces.IsEmpty() {
@@ -800,7 +455,9 @@ func (cn *PeerConn) onPeerSentHaveAll() error {
 }
 
 func (cn *PeerConn) peerSentHaveNone() error {
-       cn.t.decPeerPieceAvailability(&cn.Peer)
+       if !cn.peerSentHaveAll {
+               cn.t.decPeerPieceAvailability(&cn.Peer)
+       }
        cn._peerPieces.Clear()
        cn.peerSentHaveAll = false
        cn.peerPiecesChanged()
@@ -841,59 +498,10 @@ func (cn *PeerConn) wroteMsg(msg *pp.Message) {
        cn.allStats(func(cs *ConnStats) { cs.wroteMsg(msg) })
 }
 
-// After handshake, we know what Torrent and Client stats to include for a
-// connection.
-func (cn *Peer) postHandshakeStats(f func(*ConnStats)) {
-       t := cn.t
-       f(&t.stats)
-       f(&t.cl.stats)
-}
-
-// All ConnStats that include this connection. Some objects are not known
-// until the handshake is complete, after which it's expected to reconcile the
-// differences.
-func (cn *Peer) allStats(f func(*ConnStats)) {
-       f(&cn._stats)
-       if cn.reconciledHandshakeStats {
-               cn.postHandshakeStats(f)
-       }
-}
-
 func (cn *PeerConn) wroteBytes(n int64) {
        cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesWritten }))
 }
 
-func (cn *PeerConn) readBytes(n int64) {
-       cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead }))
-}
-
-// Returns whether the connection could be useful to us. We're seeding and
-// they want data, we don't have metainfo and they can provide it, etc.
-func (c *Peer) useful() bool {
-       t := c.t
-       if c.closed.IsSet() {
-               return false
-       }
-       if !t.haveInfo() {
-               return c.supportsExtension("ut_metadata")
-       }
-       if t.seeding() && c.peerInterested {
-               return true
-       }
-       if c.peerHasWantedPieces() {
-               return true
-       }
-       return false
-}
-
-func (c *Peer) lastHelpful() (ret time.Time) {
-       ret = c.lastUsefulChunkReceived
-       if c.t.seeding() && c.lastChunkSent.After(ret) {
-               ret = c.lastChunkSent
-       }
-       return
-}
-
 func (c *PeerConn) fastEnabled() bool {
        return c.PeerExtensionBytes.SupportsFast() && c.t.cl.config.Extensions.SupportsFast()
 }
@@ -903,13 +511,29 @@ func (c *PeerConn) reject(r Request) {
                panic("fast not enabled")
        }
        c.write(r.ToMsg(pp.Reject))
-       delete(c.peerRequests, r)
+       // It is possible to reject a request before it is added to peer requests due to being invalid.
+       if state, ok := c.peerRequests[r]; ok {
+               state.allocReservation.Drop()
+               delete(c.peerRequests, r)
+       }
 }
 
-func (c *PeerConn) onReadRequest(r Request) error {
+func (c *PeerConn) maximumPeerRequestChunkLength() (_ Option[int]) {
+       uploadRateLimiter := c.t.cl.config.UploadRateLimiter
+       if uploadRateLimiter.Limit() == rate.Inf {
+               return
+       }
+       return Some(uploadRateLimiter.Burst())
+}
+
+// startFetch is for testing purposes currently.
+func (c *PeerConn) onReadRequest(r Request, startFetch bool) error {
        requestedChunkLengths.Add(strconv.FormatUint(r.Length.Uint64(), 10), 1)
        if _, ok := c.peerRequests[r]; ok {
                torrent.Add("duplicate requests received", 1)
+               if c.fastEnabled() {
+                       return errors.New("received duplicate request with fast enabled")
+               }
                return nil
        }
        if c.choking {
@@ -929,30 +553,52 @@ func (c *PeerConn) onReadRequest(r Request) error {
                // BEP 6 says we may close here if we choose.
                return nil
        }
+       if opt := c.maximumPeerRequestChunkLength(); opt.Ok && int(r.Length) > opt.Value {
+               err := fmt.Errorf("peer requested chunk too long (%v)", r.Length)
+               c.logger.Levelf(log.Warning, err.Error())
+               if c.fastEnabled() {
+                       c.reject(r)
+                       return nil
+               } else {
+                       return err
+               }
+       }
        if !c.t.havePiece(pieceIndex(r.Index)) {
-               // This isn't necessarily them screwing up. We can drop pieces
-               // from our storage, and can't communicate this to peers
-               // except by reconnecting.
+               // TODO: Tell the peer we don't have the piece, and reject this request.
                requestsReceivedForMissingPieces.Add(1)
                return fmt.Errorf("peer requested piece we don't have: %v", r.Index.Int())
        }
+       pieceLength := c.t.pieceLength(pieceIndex(r.Index))
        // Check this after we know we have the piece, so that the piece length will be known.
-       if r.Begin+r.Length > c.t.pieceLength(pieceIndex(r.Index)) {
+       if chunkOverflowsPiece(r.ChunkSpec, pieceLength) {
                torrent.Add("bad requests received", 1)
-               return errors.New("bad Request")
+               return errors.New("chunk overflows piece")
        }
        if c.peerRequests == nil {
                c.peerRequests = make(map[Request]*peerRequestState, localClientReqq)
        }
-       value := &peerRequestState{}
+       value := &peerRequestState{
+               allocReservation: c.peerRequestDataAllocLimiter.Reserve(int64(r.Length)),
+       }
        c.peerRequests[r] = value
-       go c.peerRequestDataReader(r, value)
-       //c.tickleWriter()
+       if startFetch {
+               // TODO: Limit peer request data read concurrency.
+               go c.peerRequestDataReader(r, value)
+       }
        return nil
 }
 
 func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) {
-       b, err := readPeerRequestData(r, c)
+       // Should we depend on Torrent closure here? I think it's okay to get cancelled from elsewhere,
+       // or fail to read and then cleanup. Also, we used to hang here if the reservation was never
+       // dropped, that was fixed.
+       ctx := context.Background()
+       err := prs.allocReservation.Wait(ctx)
+       if err != nil {
+               c.logger.WithDefaultLevel(log.Debug).Levelf(log.ErrorLevel(err), "waiting for alloc limit reservation: %v", err)
+               return
+       }
+       b, err := c.readPeerRequestData(r)
        c.locker().Lock()
        defer c.locker().Unlock()
        if err != nil {
@@ -961,7 +607,9 @@ func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) {
                if b == nil {
                        panic("data must be non-nil to trigger send")
                }
+               torrent.Add("peer request data read successes", 1)
                prs.data = b
+               // This might be required for the error case too (#752 and #753).
                c.tickleWriter()
        }
 }
@@ -969,7 +617,17 @@ func (c *PeerConn) peerRequestDataReader(r Request, prs *peerRequestState) {
 // If this is maintained correctly, we might be able to support optional synchronous reading for
 // chunk sending, the way it used to work.
 func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) {
-       c.logger.WithDefaultLevel(log.Warning).Printf("error reading chunk for peer Request %v: %v", r, err)
+       torrent.Add("peer request data read failures", 1)
+       logLevel := log.Warning
+       if c.t.hasStorageCap() {
+               // It's expected that pieces might drop. See
+               // https://github.com/anacrolix/torrent/issues/702#issuecomment-1000953313.
+               logLevel = log.Debug
+       }
+       c.logger.Levelf(logLevel, "error reading chunk for peer Request %v: %v", r, err)
+       if c.t.closed.IsSet() {
+               return
+       }
        i := pieceIndex(r.Index)
        if c.t.pieceComplete(i) {
                // There used to be more code here that just duplicated the following break. Piece
@@ -978,17 +636,26 @@ func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) {
                // here.
                c.t.updatePieceCompletion(i)
        }
-       // If we failed to send a chunk, choke the peer to ensure they flush all their requests. We've
-       // probably dropped a piece from storage, but there's no way to communicate this to the peer. If
-       // they ask for it again, we'll kick them to allow us to send them an updated bitfield on the
-       // next connect. TODO: Support rejecting here too.
-       if c.choking {
-               c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+       // We've probably dropped a piece from storage, but there's no way to communicate this to the
+       // peer. If they ask for it again, we kick them allowing us to send them updated piece states if
+       // we reconnect. TODO: Instead, we could just try to update them with Bitfield or HaveNone and
+       // if they kick us for breaking protocol, on reconnect we will be compliant again (at least
+       // initially).
+       if c.fastEnabled() {
+               c.reject(r)
+       } else {
+               if c.choking {
+                       // If fast isn't enabled, I think we would have wiped all peer requests when we last
+                       // choked, and requests while we're choking would be ignored. It could be possible that
+                       // a peer request data read completed concurrently to it being deleted elsewhere.
+                       c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+               }
+               // Choking a non-fast peer should cause them to flush all their requests.
+               c.choke(c.write)
        }
-       c.choke(c.write)
 }
 
-func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) {
+func (c *PeerConn) readPeerRequestData(r Request) ([]byte, error) {
        b := make([]byte, r.Length)
        p := c.t.info.Piece(int(r.Index))
        n, err := c.t.readAt(b, p.Offset()+int64(r.Begin))
@@ -1004,18 +671,10 @@ func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) {
        return b, err
 }
 
-func runSafeExtraneous(f func()) {
-       if true {
-               go f()
-       } else {
-               f()
-       }
-}
-
 func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) {
-       c.logger.WithLevel(level).WithContextText(fmt.Sprintf(
-               "peer id %q, ext v %q", c.PeerID, c.PeerClientName,
-       )).SkipCallers(1).Printf(format, arg...)
+       c.logger.WithContextText(fmt.Sprintf(
+               "peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(),
+       )).SkipCallers(1).Levelf(level, format, arg...)
 }
 
 // Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and
@@ -1033,7 +692,7 @@ func (c *PeerConn) mainReadLoop() (err error) {
 
        decoder := pp.Decoder{
                R:         bufio.NewReaderSize(c.r, 1<<17),
-               MaxLength: 256 * 1024,
+               MaxLength: 4 * pp.Integer(max(int64(t.chunkSize), defaultChunkSize)),
                Pool:      &t.chunkPool,
        }
        for {
@@ -1068,18 +727,16 @@ func (c *PeerConn) mainReadLoop() (err error) {
                                break
                        }
                        if !c.fastEnabled() {
-                               c.deleteAllRequests()
+                               c.deleteAllRequests("choked by non-fast PeerConn")
                        } else {
-                               c.actualRequestState.Requests.Iterate(func(x uint32) bool {
-                                       if !c.peerAllowedFast.Contains(x / c.t.chunksPerRegularPiece()) {
-                                               c.t.pendingRequests.Dec(x)
-                                       }
-                                       return true
-                               })
+                               // We don't decrement pending requests here, let's wait for the peer to either
+                               // reject or satisfy the outstanding requests. Additionally, some peers may unchoke
+                               // us and resume where they left off, we don't want to have piled on to those chunks
+                               // in the meanwhile. I think a peer's ability to abuse this should be limited: they
+                               // could let us request a lot of stuff, then choke us and never reject, but they're
+                               // only a single peer, our chunk balancing should smooth over this abuse.
                        }
                        c.peerChoking = true
-                       // We can then reset our interest.
-                       c.updateRequests("choked")
                        c.updateExpectingChunks()
                case pp.Unchoke:
                        if !c.peerChoking {
@@ -1090,23 +747,25 @@ func (c *PeerConn) mainReadLoop() (err error) {
                        }
                        c.peerChoking = false
                        preservedCount := 0
-                       c.actualRequestState.Requests.Iterate(func(x uint32) bool {
-                               if !c.peerAllowedFast.Contains(x / c.t.chunksPerRegularPiece()) {
+                       c.requestState.Requests.Iterate(func(x RequestIndex) bool {
+                               if !c.peerAllowedFast.Contains(c.t.pieceIndexOfRequestIndex(x)) {
                                        preservedCount++
-                                       c.t.pendingRequests.Inc(x)
                                }
                                return true
                        })
                        if preservedCount != 0 {
                                // TODO: Yes this is a debug log but I'm not happy with the state of the logging lib
                                // right now.
-                               c.logger.WithLevel(log.Debug).Printf(
+                               c.logger.Levelf(log.Debug,
                                        "%v requests were preserved while being choked (fast=%v)",
                                        preservedCount,
                                        c.fastEnabled())
+
                                torrent.Add("requestsPreservedThroughChoking", int64(preservedCount))
                        }
-                       c.updateRequests("unchoked")
+                       if !c.t._pendingPieces.IsEmpty() {
+                               c.updateRequests("unchoked")
+                       }
                        c.updateExpectingChunks()
                case pp.Interested:
                        c.peerInterested = true
@@ -1122,7 +781,10 @@ func (c *PeerConn) mainReadLoop() (err error) {
                        err = c.peerSentBitfield(msg.Bitfield)
                case pp.Request:
                        r := newRequestFromMessage(&msg)
-                       err = c.onReadRequest(r)
+                       err = c.onReadRequest(r, true)
+                       if err != nil {
+                               err = fmt.Errorf("on reading request %v: %w", r, err)
+                       }
                case pp.Piece:
                        c.doChunkReadStats(int64(len(msg.Piece)))
                        err = c.receiveChunk(&msg)
@@ -1152,35 +814,21 @@ func (c *PeerConn) mainReadLoop() (err error) {
                        })
                case pp.Suggest:
                        torrent.Add("suggests received", 1)
-                       log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).SetLevel(log.Debug).Log(c.t.logger)
+                       log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).LogLevel(log.Debug, c.t.logger)
                        c.updateRequests("suggested")
                case pp.HaveAll:
                        err = c.onPeerSentHaveAll()
                case pp.HaveNone:
                        err = c.peerSentHaveNone()
                case pp.Reject:
-                       c.remoteRejectedRequest(c.t.requestIndexFromRequest(newRequestFromMessage(&msg)))
+                       req := newRequestFromMessage(&msg)
+                       if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) {
+                               err = fmt.Errorf("received invalid reject for request %v", req)
+                               c.logger.Levelf(log.Debug, "%v", err)
+                       }
                case pp.AllowedFast:
                        torrent.Add("allowed fasts received", 1)
-                       log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).SetLevel(log.Debug).Log(c.t.logger)
-                       pieceIndex := msg.Index.Int()
-                       // If we have outstanding requests that aren't currently counted toward the combined
-                       // outstanding request count, increment them.
-                       if c.peerAllowedFast.CheckedAdd(msg.Index.Uint32()) && c.peerChoking &&
-                               // The check here could be against having the info, but really what we need to know
-                               // is if there are any existing requests.
-                               !c.actualRequestState.Requests.IsEmpty() {
-
-                               i := c.actualRequestState.Requests.Iterator()
-                               i.AdvanceIfNeeded(t.pieceRequestIndexOffset(pieceIndex))
-                               for i.HasNext() {
-                                       r := i.Next()
-                                       if r >= t.pieceRequestIndexOffset(pieceIndex+1) {
-                                               break
-                                       }
-                                       c.t.pendingRequests.Inc(r)
-                               }
-                       }
+                       log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).LogLevel(log.Debug, c.t.logger)
                        c.updateRequests("PeerConn.mainReadLoop allowed fast")
                case pp.Extended:
                        err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload)
@@ -1193,26 +841,6 @@ func (c *PeerConn) mainReadLoop() (err error) {
        }
 }
 
-func (c *Peer) remoteRejectedRequest(r RequestIndex) {
-       if c.deleteRequest(r) {
-               if c.actualRequestState.Requests.IsEmpty() {
-                       c.updateRequests("Peer.remoteRejectedRequest")
-               }
-               c.decExpectedChunkReceive(r)
-       }
-}
-
-func (c *Peer) decExpectedChunkReceive(r RequestIndex) {
-       count := c.validReceiveChunks[r]
-       if count == 1 {
-               delete(c.validReceiveChunks, r)
-       } else if count > 1 {
-               c.validReceiveChunks[r] = count - 1
-       } else {
-               panic(r)
-       }
-}
-
 func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err error) {
        defer func() {
                // TODO: Should we still do this?
@@ -1236,11 +864,11 @@ func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err
                if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
                        cb(c, &d)
                }
-               //c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
+               // c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
                if d.Reqq != 0 {
                        c.PeerMaxRequests = d.Reqq
                }
-               c.PeerClientName = d.V
+               c.PeerClientName.Store(d.V)
                if c.PeerExtensionIDs == nil {
                        c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber, len(d.M))
                }
@@ -1248,7 +876,11 @@ func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err
                c.PeerPrefersEncryption = d.Encryption
                for name, id := range d.M {
                        if _, ok := c.PeerExtensionIDs[name]; !ok {
-                               peersSupportingExtension.Add(string(name), 1)
+                               peersSupportingExtension.Add(
+                                       // expvar.Var.String must produce valid JSON. "ut_payme\xeet_address" was being
+                                       // entered here which caused problems later when unmarshalling.
+                                       strconv.Quote(string(name)),
+                                       1)
                        }
                        c.PeerExtensionIDs[name] = id
                }
@@ -1260,6 +892,7 @@ func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err
                c.requestPendingMetadata()
                if !t.cl.config.DisablePEX {
                        t.pex.Add(c) // we learnt enough now
+                       // This checks the extension is supported internally.
                        c.pex.Init(c)
                }
                return nil
@@ -1273,7 +906,20 @@ func (c *PeerConn) onReadExtendedMsg(id pp.ExtensionNumber, payload []byte) (err
                if !c.pex.IsEnabled() {
                        return nil // or hang-up maybe?
                }
-               return c.pex.Recv(payload)
+               err = c.pex.Recv(payload)
+               if err != nil {
+                       err = fmt.Errorf("receiving pex message: %w", err)
+               }
+               return
+       case utHolepunchExtendedId:
+               var msg utHolepunch.Msg
+               err = msg.UnmarshalBinary(payload)
+               if err != nil {
+                       err = fmt.Errorf("unmarshalling ut_holepunch message: %w", err)
+                       return
+               }
+               err = c.t.handleReceivedUtHolepunchMsg(msg, c)
+               return
        default:
                return fmt.Errorf("unexpected extended message ID: %v", id)
        }
@@ -1293,149 +939,6 @@ func (cn *PeerConn) rw() io.ReadWriter {
        }{cn.r, cn.w}
 }
 
-func (c *Peer) doChunkReadStats(size int64) {
-       c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) })
-}
-
-// Handle a received chunk from a peer.
-func (c *Peer) receiveChunk(msg *pp.Message) error {
-       chunksReceived.Add("total", 1)
-
-       ppReq := newRequestFromMessage(msg)
-       req := c.t.requestIndexFromRequest(ppReq)
-
-       if c.peerChoking {
-               chunksReceived.Add("while choked", 1)
-       }
-
-       if c.validReceiveChunks[req] <= 0 {
-               chunksReceived.Add("unexpected", 1)
-               return errors.New("received unexpected chunk")
-       }
-       c.decExpectedChunkReceive(req)
-
-       if c.peerChoking && c.peerAllowedFast.Contains(bitmap.BitIndex(ppReq.Index)) {
-               chunksReceived.Add("due to allowed fast", 1)
-       }
-
-       // The request needs to be deleted immediately to prevent cancels occurring asynchronously when
-       // have actually already received the piece, while we have the Client unlocked to write the data
-       // out.
-       deletedRequest := false
-       {
-               if c.actualRequestState.Requests.Contains(req) {
-                       for _, f := range c.callbacks.ReceivedRequested {
-                               f(PeerMessageEvent{c, msg})
-                       }
-               }
-               // Request has been satisfied.
-               if c.deleteRequest(req) {
-                       deletedRequest = true
-                       if !c.peerChoking {
-                               c._chunksReceivedWhileExpecting++
-                       }
-                       if c.actualRequestState.Requests.IsEmpty() {
-                               c.updateRequests("Peer.receiveChunk deleted request")
-                       }
-               } else {
-                       chunksReceived.Add("unwanted", 1)
-               }
-       }
-
-       t := c.t
-       cl := t.cl
-
-       // Do we actually want this chunk?
-       if t.haveChunk(ppReq) {
-               chunksReceived.Add("wasted", 1)
-               c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
-               return nil
-       }
-
-       piece := &t.pieces[ppReq.Index]
-
-       c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful }))
-       c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData }))
-       if deletedRequest {
-               c.piecesReceivedSinceLastRequestUpdate++
-               c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
-       }
-       for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData {
-               f(ReceivedUsefulDataEvent{c, msg})
-       }
-       c.lastUsefulChunkReceived = time.Now()
-
-       // Need to record that it hasn't been written yet, before we attempt to do
-       // anything with it.
-       piece.incrementPendingWrites()
-       // Record that we have the chunk, so we aren't trying to download it while
-       // waiting for it to be written to storage.
-       piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
-
-       // Cancel pending requests for this chunk from *other* peers.
-       t.iterPeers(func(p *Peer) {
-               if p == c {
-                       return
-               }
-               p.cancel(req)
-       })
-
-       err := func() error {
-               cl.unlock()
-               defer cl.lock()
-               concurrentChunkWrites.Add(1)
-               defer concurrentChunkWrites.Add(-1)
-               // Write the chunk out. Note that the upper bound on chunk writing concurrency will be the
-               // number of connections. We write inline with receiving the chunk (with this lock dance),
-               // because we want to handle errors synchronously and I haven't thought of a nice way to
-               // defer any concurrency to the storage and have that notify the client of errors. TODO: Do
-               // that instead.
-               return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
-       }()
-
-       piece.decrementPendingWrites()
-
-       if err != nil {
-               c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err)
-               t.pendRequest(req)
-               // Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
-               // request update runs while we're writing the chunk that just failed. Then we never do a
-               // fresh update after pending the failed request.
-               c.updateRequests("Peer.receiveChunk error writing chunk")
-               t.onWriteChunkErr(err)
-               return nil
-       }
-
-       c.onDirtiedPiece(pieceIndex(ppReq.Index))
-
-       // We need to ensure the piece is only queued once, so only the last chunk writer gets this job.
-       if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 {
-               t.queuePieceCheck(pieceIndex(ppReq.Index))
-               // We don't pend all chunks here anymore because we don't want code dependent on the dirty
-               // chunk status (such as the haveChunk call above) to have to check all the various other
-               // piece states like queued for hash, hashing etc. This does mean that we need to be sure
-               // that chunk pieces are pended at an appropriate time later however.
-       }
-
-       cl.event.Broadcast()
-       // We do this because we've written a chunk, and may change PieceState.Partial.
-       t.publishPieceChange(pieceIndex(ppReq.Index))
-
-       return nil
-}
-
-func (c *Peer) onDirtiedPiece(piece pieceIndex) {
-       if c.peerTouchedPieces == nil {
-               c.peerTouchedPieces = make(map[pieceIndex]struct{})
-       }
-       c.peerTouchedPieces[piece] = struct{}{}
-       ds := &c.t.pieces[piece].dirtiers
-       if *ds == nil {
-               *ds = make(map[*Peer]struct{})
-       }
-       (*ds)[c] = struct{}{}
-}
-
 func (c *PeerConn) uploadAllowed() bool {
        if c.t.cl.config.NoUpload {
                return false
@@ -1505,46 +1008,8 @@ func (cn *PeerConn) drop() {
        cn.t.dropConnection(cn)
 }
 
-func (cn *Peer) netGoodPiecesDirtied() int64 {
-       return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
-}
-
-func (c *Peer) peerHasWantedPieces() bool {
-       if c.peerSentHaveAll {
-               return !c.t.haveAllPieces()
-       }
-       if !c.t.haveInfo() {
-               return !c._peerPieces.IsEmpty()
-       }
-       return c._peerPieces.Intersects(&c.t._pendingPieces)
-}
-
-func (c *Peer) deleteRequest(r RequestIndex) bool {
-       if !c.actualRequestState.Requests.CheckedRemove(r) {
-               return false
-       }
-       c.cancelledRequests.Remove(r)
-       for _, f := range c.callbacks.DeletedRequest {
-               f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)})
-       }
-       c.updateExpectingChunks()
-       if !c.peerChoking || c.peerAllowedFast.Contains(r/c.t.chunksPerRegularPiece()) {
-               c.t.pendingRequests.Dec(r)
-       }
-       return true
-}
-
-func (c *Peer) deleteAllRequests() {
-       c.actualRequestState.Requests.Clone().Iterate(func(x uint32) bool {
-               c.deleteRequest(x)
-               return true
-       })
-       if !c.actualRequestState.Requests.IsEmpty() {
-               panic(c.actualRequestState.Requests.GetCardinality())
-       }
-       // for c := range c.t.conns {
-       //      c.tickleWriter()
-       // }
+func (cn *PeerConn) ban() {
+       cn.t.cl.banPeerIP(cn.remoteIp())
 }
 
 // This is called when something has changed that should wake the writer, such as putting stuff into
@@ -1555,6 +1020,7 @@ func (c *PeerConn) tickleWriter() {
 
 func (c *PeerConn) sendChunk(r Request, msg func(pp.Message) bool, state *peerRequestState) (more bool) {
        c.lastChunkSent = time.Now()
+       state.allocReservation.Release()
        return msg(pp.Message{
                Type:  pp.Piece,
                Index: r.Index,
@@ -1572,20 +1038,6 @@ func (c *PeerConn) setTorrent(t *Torrent) {
        t.reconcileHandshakeStats(c)
 }
 
-func (c *Peer) peerPriority() (peerPriority, error) {
-       return bep40Priority(c.remoteIpPort(), c.t.cl.publicAddr(c.remoteIp()))
-}
-
-func (c *Peer) remoteIp() net.IP {
-       host, _, _ := net.SplitHostPort(c.RemoteAddr.String())
-       return net.ParseIP(host)
-}
-
-func (c *Peer) remoteIpPort() IpPort {
-       ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr)
-       return IpPort{ipa.IP, uint16(ipa.Port)}
-}
-
 func (c *PeerConn) pexPeerFlags() pp.PexPeerFlags {
        f := pp.PexPeerFlags(0)
        if c.PeerPrefersEncryption {
@@ -1603,42 +1055,35 @@ func (c *PeerConn) pexPeerFlags() pp.PexPeerFlags {
 // This returns the address to use if we want to dial the peer again. It incorporates the peer's
 // advertised listen port.
 func (c *PeerConn) dialAddr() PeerRemoteAddr {
-       if !c.outgoing && c.PeerListenPort != 0 {
-               switch addr := c.RemoteAddr.(type) {
-               case *net.TCPAddr:
-                       dialAddr := *addr
-                       dialAddr.Port = c.PeerListenPort
-                       return &dialAddr
-               case *net.UDPAddr:
-                       dialAddr := *addr
-                       dialAddr.Port = c.PeerListenPort
-                       return &dialAddr
-               }
+       if c.outgoing || c.PeerListenPort == 0 {
+               return c.RemoteAddr
+       }
+       addrPort, err := addrPortFromPeerRemoteAddr(c.RemoteAddr)
+       if err != nil {
+               c.logger.Levelf(
+                       log.Warning,
+                       "error parsing %q for alternate dial port: %v",
+                       c.RemoteAddr,
+                       err,
+               )
+               return c.RemoteAddr
        }
-       return c.RemoteAddr
+       return netip.AddrPortFrom(addrPort.Addr(), uint16(c.PeerListenPort))
 }
 
-func (c *PeerConn) pexEvent(t pexEventType) pexEvent {
+func (c *PeerConn) pexEvent(t pexEventType) (_ pexEvent, err error) {
        f := c.pexPeerFlags()
-       addr := c.dialAddr()
-       return pexEvent{t, addr, f}
+       dialAddr := c.dialAddr()
+       addr, err := addrPortFromPeerRemoteAddr(dialAddr)
+       if err != nil || !addr.IsValid() {
+               err = fmt.Errorf("parsing dial addr %q: %w", dialAddr, err)
+               return
+       }
+       return pexEvent{t, addr, f, nil}, nil
 }
 
 func (c *PeerConn) String() string {
-       return fmt.Sprintf("connection %p", c)
-}
-
-func (c *Peer) trust() connectionTrust {
-       return connectionTrust{c.trusted, c.netGoodPiecesDirtied()}
-}
-
-type connectionTrust struct {
-       Implicit            bool
-       NetGoodPiecesDirted int64
-}
-
-func (l connectionTrust) Less(r connectionTrust) bool {
-       return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less()
+       return fmt.Sprintf("%T %p [id=%+q, exts=%v, v=%q]", c, c, c.PeerID, c.PeerExtensionBytes, c.PeerClientName.Load())
 }
 
 // Returns the pieces the peer could have based on their claims. If we don't know how many pieces
@@ -1649,25 +1094,38 @@ func (cn *PeerConn) PeerPieces() *roaring.Bitmap {
        return cn.newPeerPieces()
 }
 
-// Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims.
-func (cn *Peer) newPeerPieces() *roaring.Bitmap {
-       // TODO: Can we use copy on write?
-       ret := cn._peerPieces.Clone()
-       if cn.peerSentHaveAll {
-               if cn.t.haveInfo() {
-                       ret.AddRange(0, bitmap.BitRange(cn.t.numPieces()))
-               } else {
-                       ret.AddRange(0, bitmap.ToEnd)
-               }
-       }
-       return ret
+func (pc *PeerConn) remoteIsTransmission() bool {
+       return bytes.HasPrefix(pc.PeerID[:], []byte("-TR")) && pc.PeerID[7] == '-'
+}
+
+func (pc *PeerConn) remoteDialAddrPort() (netip.AddrPort, error) {
+       dialAddr := pc.dialAddr()
+       return addrPortFromPeerRemoteAddr(dialAddr)
 }
 
-func (cn *Peer) stats() *ConnStats {
-       return &cn._stats
+func (pc *PeerConn) bitExtensionEnabled(bit pp.ExtensionBit) bool {
+       return pc.t.cl.config.Extensions.GetBit(bit) && pc.PeerExtensionBytes.GetBit(bit)
+}
+
+func (cn *PeerConn) peerPiecesChanged() {
+       cn.t.maybeDropMutuallyCompletePeer(cn)
 }
 
-func (p *Peer) TryAsPeerConn() (*PeerConn, bool) {
-       pc, ok := p.peerImpl.(*PeerConn)
-       return pc, ok
+// Returns whether the connection could be useful to us. We're seeding and
+// they want data, we don't have metainfo and they can provide it, etc.
+func (c *PeerConn) useful() bool {
+       t := c.t
+       if c.closed.IsSet() {
+               return false
+       }
+       if !t.haveInfo() {
+               return c.supportsExtension("ut_metadata")
+       }
+       if t.seeding() && c.peerInterested {
+               return true
+       }
+       if c.peerHasWantedPieces() {
+               return true
+       }
+       return false
 }