12 "github.com/RoaringBitmap/roaring"
13 "github.com/anacrolix/chansync"
14 . "github.com/anacrolix/generics"
15 "github.com/anacrolix/log"
16 "github.com/anacrolix/missinggo/iter"
17 "github.com/anacrolix/missinggo/v2/bitmap"
18 "github.com/anacrolix/multiless"
20 "github.com/anacrolix/torrent/internal/alloclim"
21 "github.com/anacrolix/torrent/mse"
22 pp "github.com/anacrolix/torrent/peer_protocol"
23 request_strategy "github.com/anacrolix/torrent/request-strategy"
24 typedRoaring "github.com/anacrolix/torrent/typed-roaring"
29 // First to ensure 64-bit alignment for atomics. See #262.
39 RemoteAddr PeerRemoteAddr
40 // The local address as observed by the remote peer. WebRTC seems to get this right without needing hints from the
42 localPublicAddr peerLocalPublicAddr
43 bannableAddr Option[bannableAddr]
44 // True if the connection is operating over MSE obfuscation.
46 cryptoMethod mse.CryptoMethod
49 closed chansync.SetOnce
50 // Set true after we've added our ConnStats generated during handshake to
51 // other ConnStat instances as determined when the *Torrent became known.
52 reconciledHandshakeStats bool
54 lastMessageReceived time.Time
55 completedHandshake time.Time
56 lastUsefulChunkReceived time.Time
57 lastChunkSent time.Time
59 // Stuff controlled by the local peer.
60 needRequestUpdate string
61 requestState request_strategy.PeerRequestState
62 updateRequestsTimer *time.Timer
63 lastRequestUpdate time.Time
64 peakRequests maxRequests
65 lastBecameInterested time.Time
66 priorInterest time.Duration
68 lastStartedExpectingToReceiveChunks time.Time
69 cumulativeExpectedToReceiveChunks time.Duration
70 _chunksReceivedWhileExpecting int64
73 piecesReceivedSinceLastRequestUpdate maxRequests
74 maxPiecesReceivedBetweenRequestUpdates maxRequests
75 // Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering,
76 // and implementation differences, we may receive chunks that are no longer in the set of
77 // requests actually want. This could use a roaring.BSI if the memory use becomes noticeable.
78 validReceiveChunks map[RequestIndex]int
79 // Indexed by metadata piece, set to true if posted and pending a
81 metadataRequests []bool
82 sentHaves bitmap.Bitmap
84 // Stuff controlled by the remote peer.
87 peerRequests map[Request]*peerRequestState
88 PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake
89 // The highest possible number of pieces the torrent could have based on
90 // communication with the peer. Generally only useful until we have the
92 peerMinPieces pieceIndex
93 // Pieces we've accepted chunks for from the peer.
94 peerTouchedPieces map[pieceIndex]struct{}
95 peerAllowedFast typedRoaring.Bitmap[pieceIndex]
97 PeerMaxRequests maxRequests // Maximum pending requests the peer allows.
104 peerRequestState struct {
106 allocReservation *alloclim.Reservation
109 PeerRemoteAddr interface {
113 peerRequests = orderedBitmap[RequestIndex]
117 PeerSourceUtHolepunch = "C"
118 PeerSourceTracker = "Tr"
119 PeerSourceIncoming = "I"
120 PeerSourceDhtGetPeers = "Hg" // Peers we found by searching a DHT.
121 PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT.
123 // The peer was given directly, such as through a magnet link.
124 PeerSourceDirect = "M"
127 // Returns the Torrent a Peer belongs to. Shouldn't change for the lifetime of the Peer. May be nil
128 // if we are the receiving end of a connection and the handshake hasn't been received or accepted
130 func (p *Peer) Torrent() *Torrent {
134 func (p *Peer) initRequestState() {
135 p.requestState.Requests = &peerRequests{}
138 func (cn *Peer) updateExpectingChunks() {
139 if cn.expectingChunks() {
140 if cn.lastStartedExpectingToReceiveChunks.IsZero() {
141 cn.lastStartedExpectingToReceiveChunks = time.Now()
144 if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
145 cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks)
146 cn.lastStartedExpectingToReceiveChunks = time.Time{}
151 func (cn *Peer) expectingChunks() bool {
152 if cn.requestState.Requests.IsEmpty() {
155 if !cn.requestState.Interested {
161 haveAllowedFastRequests := false
162 cn.peerAllowedFast.Iterate(func(i pieceIndex) bool {
163 haveAllowedFastRequests = roaringBitmapRangeCardinality[RequestIndex](
164 cn.requestState.Requests,
165 cn.t.pieceRequestIndexOffset(i),
166 cn.t.pieceRequestIndexOffset(i+1),
168 return !haveAllowedFastRequests
170 return haveAllowedFastRequests
173 func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool {
174 return cn.peerChoking && !cn.peerAllowedFast.Contains(piece)
177 func (cn *Peer) cumInterest() time.Duration {
178 ret := cn.priorInterest
179 if cn.requestState.Interested {
180 ret += time.Since(cn.lastBecameInterested)
185 func (cn *Peer) locker() *lockWithDeferreds {
186 return cn.t.cl.locker()
189 func (cn *PeerConn) supportsExtension(ext pp.ExtensionName) bool {
190 _, ok := cn.PeerExtensionIDs[ext]
194 // The best guess at number of pieces in the torrent for this peer.
195 func (cn *Peer) bestPeerNumPieces() pieceIndex {
197 return cn.t.numPieces()
199 return cn.peerMinPieces
202 func (cn *Peer) completedString() string {
203 have := pieceIndex(cn.peerPieces().GetCardinality())
204 if all, _ := cn.peerHasAllPieces(); all {
205 have = cn.bestPeerNumPieces()
207 return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces())
210 func eventAgeString(t time.Time) string {
214 return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds())
217 // Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text.
218 func (cn *Peer) statusFlags() (ret string) {
220 ret += string([]byte{b})
222 if cn.requestState.Interested {
229 ret += cn.connectionFlags()
231 if cn.peerInterested {
240 func (cn *Peer) StatusFlags() string {
241 return cn.statusFlags()
244 func (cn *Peer) downloadRate() float64 {
245 num := cn._stats.BytesReadUsefulData.Int64()
249 return float64(num) / cn.totalExpectingTime().Seconds()
252 func (p *Peer) DownloadRate() float64 {
254 defer p.locker().RUnlock()
256 return p.downloadRate()
259 func (cn *Peer) UploadRate() float64 {
261 defer cn.locker().RUnlock()
262 num := cn._stats.BytesWrittenData.Int64()
266 return float64(num) / time.Now().Sub(cn.completedHandshake).Seconds()
269 func (cn *Peer) iterContiguousPieceRequests(f func(piece pieceIndex, count int)) {
270 var last Option[pieceIndex]
272 next := func(item Option[pieceIndex]) {
283 cn.requestState.Requests.Iterate(func(requestIndex request_strategy.RequestIndex) bool {
284 next(Some(cn.t.pieceIndexOfRequestIndex(requestIndex)))
287 next(None[pieceIndex]())
290 func (cn *Peer) writeStatus(w io.Writer) {
291 // \t isn't preserved in <pre> blocks?
292 if cn.closed.IsSet() {
293 fmt.Fprint(w, "CLOSED: ")
295 fmt.Fprintln(w, strings.Join(cn.peerImplStatusLines(), "\n"))
296 prio, err := cn.peerPriority()
297 prioStr := fmt.Sprintf("%08x", prio)
299 prioStr += ": " + err.Error()
301 fmt.Fprintf(w, "bep40-prio: %v\n", prioStr)
302 fmt.Fprintf(w, "last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n",
303 eventAgeString(cn.lastMessageReceived),
304 eventAgeString(cn.completedHandshake),
305 eventAgeString(cn.lastHelpful()),
307 cn.totalExpectingTime(),
310 "%s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d+%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n",
311 cn.completedString(),
312 len(cn.peerTouchedPieces),
313 &cn._stats.ChunksReadUseful,
314 &cn._stats.ChunksRead,
315 &cn._stats.ChunksWritten,
316 cn.requestState.Requests.GetCardinality(),
317 cn.requestState.Cancelled.GetCardinality(),
318 cn.nominalMaxRequests(),
320 len(cn.peerRequests),
323 cn.downloadRate()/(1<<10),
325 fmt.Fprintf(w, "requested pieces:")
326 cn.iterContiguousPieceRequests(func(piece pieceIndex, count int) {
327 fmt.Fprintf(w, " %v(%v)", piece, count)
332 func (p *Peer) close() {
336 if p.updateRequestsTimer != nil {
337 p.updateRequestsTimer.Stop()
339 for _, prs := range p.peerRequests {
340 prs.allocReservation.Drop()
344 p.t.decPeerPieceAvailability(p)
346 for _, f := range p.callbacks.PeerClosed {
351 func (p *Peer) Close() error {
353 defer p.locker().Unlock()
358 // Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think
359 // they do (known=true).
360 func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
361 if all, known := cn.peerHasAllPieces(); all && known {
364 return cn.peerPieces().ContainsInt(piece)
367 // 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
368 // https://github.com/pion/datachannel/issues/59 is fixed.
370 writeBufferHighWaterLen = 1 << 15
371 writeBufferLowWaterLen = writeBufferHighWaterLen / 2
375 interestedMsgLen = len(pp.Message{Type: pp.Interested}.MustMarshalBinary())
376 requestMsgLen = len(pp.Message{Type: pp.Request}.MustMarshalBinary())
377 // This is the maximum request count that could fit in the write buffer if it's at or below the
378 // low water mark when we run maybeUpdateActualRequestState.
379 maxLocalToRemoteRequests = (writeBufferHighWaterLen - writeBufferLowWaterLen - interestedMsgLen) / requestMsgLen
382 // The actual value to use as the maximum outbound requests.
383 func (cn *Peer) nominalMaxRequests() maxRequests {
384 return maxInt(1, minInt(cn.PeerMaxRequests, cn.peakRequests*2, maxLocalToRemoteRequests))
387 func (cn *Peer) totalExpectingTime() (ret time.Duration) {
388 ret = cn.cumulativeExpectedToReceiveChunks
389 if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
390 ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
395 func (cn *Peer) setInterested(interested bool) bool {
396 if cn.requestState.Interested == interested {
399 cn.requestState.Interested = interested
401 cn.lastBecameInterested = time.Now()
402 } else if !cn.lastBecameInterested.IsZero() {
403 cn.priorInterest += time.Since(cn.lastBecameInterested)
405 cn.updateExpectingChunks()
406 // log.Printf("%p: setting interest: %v", cn, interested)
407 return cn.writeInterested(interested)
410 // The function takes a message to be sent, and returns true if more messages
412 type messageWriter func(pp.Message) bool
414 // This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it
415 // when we want to go fast.
416 func (cn *Peer) shouldRequest(r RequestIndex) error {
417 err := cn.t.checkValidReceiveChunk(cn.t.requestIndexToRequest(r))
421 pi := cn.t.pieceIndexOfRequestIndex(r)
422 if cn.requestState.Cancelled.Contains(r) {
423 return errors.New("request is cancelled and waiting acknowledgement")
425 if !cn.peerHasPiece(pi) {
426 return errors.New("requesting piece peer doesn't have")
428 if !cn.t.peerIsActive(cn) {
429 panic("requesting but not in active conns")
431 if cn.closed.IsSet() {
432 panic("requesting when connection is closed")
434 if cn.t.hashingPiece(pi) {
435 panic("piece is being hashed")
437 if cn.t.pieceQueuedForHash(pi) {
438 panic("piece is queued for hash")
440 if cn.peerChoking && !cn.peerAllowedFast.Contains(pi) {
441 // This could occur if we made a request with the fast extension, and then got choked and
442 // haven't had the request rejected yet.
443 if !cn.requestState.Requests.Contains(r) {
444 panic("peer choking and piece not allowed fast")
450 func (cn *Peer) mustRequest(r RequestIndex) bool {
451 more, err := cn.request(r)
458 func (cn *Peer) request(r RequestIndex) (more bool, err error) {
459 if err := cn.shouldRequest(r); err != nil {
462 if cn.requestState.Requests.Contains(r) {
465 if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
466 return true, errors.New("too many outstanding requests")
468 cn.requestState.Requests.Add(r)
469 if cn.validReceiveChunks == nil {
470 cn.validReceiveChunks = make(map[RequestIndex]int)
472 cn.validReceiveChunks[r]++
473 cn.t.requestState[r] = requestState{
477 cn.updateExpectingChunks()
478 ppReq := cn.t.requestIndexToRequest(r)
479 for _, f := range cn.callbacks.SentRequest {
480 f(PeerRequestEvent{cn, ppReq})
482 return cn.peerImpl._request(ppReq), nil
485 func (me *Peer) cancel(r RequestIndex) {
486 if !me.deleteRequest(r) {
487 panic("request not existing should have been guarded")
490 // Record that we expect to get a cancel ack.
491 if !me.requestState.Cancelled.CheckedAdd(r) {
492 panic("request already cancelled")
496 if me.isLowOnRequests() {
497 me.updateRequests("Peer.cancel")
501 // Sets a reason to update requests, and if there wasn't already one, handle it.
502 func (cn *Peer) updateRequests(reason string) {
503 if cn.needRequestUpdate != "" {
506 cn.needRequestUpdate = reason
507 cn.handleUpdateRequests()
510 // Emits the indices in the Bitmaps bms in order, never repeating any index.
511 // skip is mutated during execution, and its initial values will never be
513 func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func {
514 return func(cb iter.Callback) {
515 for _, bm := range bms {
517 func(_i interface{}) bool {
519 if skip.Contains(bitmap.BitIndex(i)) {
522 skip.Add(bitmap.BitIndex(i))
533 // After handshake, we know what Torrent and Client stats to include for a
535 func (cn *Peer) postHandshakeStats(f func(*ConnStats)) {
541 // All ConnStats that include this connection. Some objects are not known
542 // until the handshake is complete, after which it's expected to reconcile the
544 func (cn *Peer) allStats(f func(*ConnStats)) {
546 if cn.reconciledHandshakeStats {
547 cn.postHandshakeStats(f)
551 func (cn *Peer) Stats() *ConnStats {
555 func (cn *Peer) CompletedString() string {
556 return cn.completedString()
559 func (cn *Peer) readBytes(n int64) {
560 cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead }))
563 func (c *Peer) lastHelpful() (ret time.Time) {
564 ret = c.lastUsefulChunkReceived
565 if c.t.seeding() && c.lastChunkSent.After(ret) {
566 ret = c.lastChunkSent
571 // Returns whether any part of the chunk would lie outside a piece of the given length.
572 func chunkOverflowsPiece(cs ChunkSpec, pieceLength pp.Integer) bool {
576 case cs.Begin+cs.Length > pieceLength:
577 // Check for integer overflow
578 case cs.Begin > pp.IntegerMax-cs.Length:
583 func runSafeExtraneous(f func()) {
591 // Returns true if it was valid to reject the request.
592 func (c *Peer) remoteRejectedRequest(r RequestIndex) bool {
593 if c.deleteRequest(r) {
595 } else if !c.requestState.Cancelled.CheckedRemove(r) {
598 if c.isLowOnRequests() {
599 c.updateRequests("Peer.remoteRejectedRequest")
601 c.decExpectedChunkReceive(r)
605 func (c *Peer) decExpectedChunkReceive(r RequestIndex) {
606 count := c.validReceiveChunks[r]
608 delete(c.validReceiveChunks, r)
609 } else if count > 1 {
610 c.validReceiveChunks[r] = count - 1
616 func (c *Peer) doChunkReadStats(size int64) {
617 c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) })
620 // Handle a received chunk from a peer.
621 func (c *Peer) receiveChunk(msg *pp.Message) error {
622 chunksReceived.Add("total", 1)
624 ppReq := newRequestFromMessage(msg)
626 err := t.checkValidReceiveChunk(ppReq)
628 err = log.WithLevel(log.Warning, err)
631 req := c.t.requestIndexFromRequest(ppReq)
633 recordBlockForSmartBan := sync.OnceFunc(func() {
634 c.recordBlockForSmartBan(req, msg.Piece)
636 // This needs to occur before we return, but we try to do it when the client is unlocked. It
637 // can't be done before checking if chunks are valid because they won't be deallocated by piece
638 // hashing if they're out of bounds.
639 defer recordBlockForSmartBan()
642 chunksReceived.Add("while choked", 1)
645 if c.validReceiveChunks[req] <= 0 {
646 chunksReceived.Add("unexpected", 1)
647 return errors.New("received unexpected chunk")
649 c.decExpectedChunkReceive(req)
651 if c.peerChoking && c.peerAllowedFast.Contains(pieceIndex(ppReq.Index)) {
652 chunksReceived.Add("due to allowed fast", 1)
655 // The request needs to be deleted immediately to prevent cancels occurring asynchronously when
656 // have actually already received the piece, while we have the Client unlocked to write the data
660 if c.requestState.Requests.Contains(req) {
661 for _, f := range c.callbacks.ReceivedRequested {
662 f(PeerMessageEvent{c, msg})
665 // Request has been satisfied.
666 if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) {
669 c._chunksReceivedWhileExpecting++
671 if c.isLowOnRequests() {
672 c.updateRequests("Peer.receiveChunk deleted request")
675 chunksReceived.Add("unintended", 1)
681 // Do we actually want this chunk?
682 if t.haveChunk(ppReq) {
683 // panic(fmt.Sprintf("%+v", ppReq))
684 chunksReceived.Add("redundant", 1)
685 c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
689 piece := &t.pieces[ppReq.Index]
691 c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful }))
692 c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData }))
694 c.piecesReceivedSinceLastRequestUpdate++
695 c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
697 for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData {
698 f(ReceivedUsefulDataEvent{c, msg})
700 c.lastUsefulChunkReceived = time.Now()
702 // Need to record that it hasn't been written yet, before we attempt to do
704 piece.incrementPendingWrites()
705 // Record that we have the chunk, so we aren't trying to download it while
706 // waiting for it to be written to storage.
707 piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
709 // Cancel pending requests for this chunk from *other* peers.
710 if p := t.requestingPeer(req); p != nil {
712 panic("should not be pending request from conn that just received it")
720 // Opportunistically do this here while we aren't holding the client lock.
721 recordBlockForSmartBan()
722 concurrentChunkWrites.Add(1)
723 defer concurrentChunkWrites.Add(-1)
724 // Write the chunk out. Note that the upper bound on chunk writing concurrency will be the
725 // number of connections. We write inline with receiving the chunk (with this lock dance),
726 // because we want to handle errors synchronously and I haven't thought of a nice way to
727 // defer any concurrency to the storage and have that notify the client of errors. TODO: Do
729 return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
732 piece.decrementPendingWrites()
735 c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err)
737 // Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
738 // request update runs while we're writing the chunk that just failed. Then we never do a
739 // fresh update after pending the failed request.
740 c.updateRequests("Peer.receiveChunk error writing chunk")
741 t.onWriteChunkErr(err)
745 c.onDirtiedPiece(pieceIndex(ppReq.Index))
747 // We need to ensure the piece is only queued once, so only the last chunk writer gets this job.
748 if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 {
749 t.queuePieceCheck(pieceIndex(ppReq.Index))
750 // We don't pend all chunks here anymore because we don't want code dependent on the dirty
751 // chunk status (such as the haveChunk call above) to have to check all the various other
752 // piece states like queued for hash, hashing etc. This does mean that we need to be sure
753 // that chunk pieces are pended at an appropriate time later however.
757 // We do this because we've written a chunk, and may change PieceState.Partial.
758 t.publishPieceStateChange(pieceIndex(ppReq.Index))
763 func (c *Peer) onDirtiedPiece(piece pieceIndex) {
764 if c.peerTouchedPieces == nil {
765 c.peerTouchedPieces = make(map[pieceIndex]struct{})
767 c.peerTouchedPieces[piece] = struct{}{}
768 ds := &c.t.pieces[piece].dirtiers
770 *ds = make(map[*Peer]struct{})
772 (*ds)[c] = struct{}{}
775 func (cn *Peer) netGoodPiecesDirtied() int64 {
776 return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
779 func (c *Peer) peerHasWantedPieces() bool {
780 if all, _ := c.peerHasAllPieces(); all {
781 return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty()
784 return !c.peerPieces().IsEmpty()
786 return c.peerPieces().Intersects(&c.t._pendingPieces)
789 // Returns true if an outstanding request is removed. Cancelled requests should be handled
791 func (c *Peer) deleteRequest(r RequestIndex) bool {
792 if !c.requestState.Requests.CheckedRemove(r) {
795 for _, f := range c.callbacks.DeletedRequest {
796 f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)})
798 c.updateExpectingChunks()
799 if c.t.requestingPeer(r) != c {
800 panic("only one peer should have a given request at a time")
802 delete(c.t.requestState, r)
803 // c.t.iterPeers(func(p *Peer) {
804 // if p.isLowOnRequests() {
805 // p.updateRequests("Peer.deleteRequest")
811 func (c *Peer) deleteAllRequests(reason string) {
812 if c.requestState.Requests.IsEmpty() {
815 c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
816 if !c.deleteRequest(x) {
817 panic("request should exist")
822 c.t.iterPeers(func(p *Peer) {
823 if p.isLowOnRequests() {
824 p.updateRequests(reason)
830 func (c *Peer) assertNoRequests() {
831 if !c.requestState.Requests.IsEmpty() {
832 panic(c.requestState.Requests.GetCardinality())
836 func (c *Peer) cancelAllRequests() {
837 c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
845 func (c *Peer) peerPriority() (peerPriority, error) {
846 return bep40Priority(c.remoteIpPort(), c.localPublicAddr)
849 func (c *Peer) remoteIp() net.IP {
850 host, _, _ := net.SplitHostPort(c.RemoteAddr.String())
851 return net.ParseIP(host)
854 func (c *Peer) remoteIpPort() IpPort {
855 ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr)
856 return IpPort{ipa.IP, uint16(ipa.Port)}
859 func (c *Peer) trust() connectionTrust {
860 return connectionTrust{c.trusted, c.netGoodPiecesDirtied()}
863 type connectionTrust struct {
865 NetGoodPiecesDirted int64
868 func (l connectionTrust) Less(r connectionTrust) bool {
869 return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less()
872 // Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims.
873 func (cn *Peer) newPeerPieces() *roaring.Bitmap {
874 // TODO: Can we use copy on write?
875 ret := cn.peerPieces().Clone()
876 if all, _ := cn.peerHasAllPieces(); all {
878 ret.AddRange(0, bitmap.BitRange(cn.t.numPieces()))
880 ret.AddRange(0, bitmap.ToEnd)
886 func (cn *Peer) stats() *ConnStats {
890 func (p *Peer) TryAsPeerConn() (*PeerConn, bool) {
891 pc, ok := p.peerImpl.(*PeerConn)
895 func (p *Peer) uncancelledRequests() uint64 {
896 return p.requestState.Requests.GetCardinality()
899 type peerLocalPublicAddr = IpPort
901 func (p *Peer) isLowOnRequests() bool {
902 return p.requestState.Requests.IsEmpty() && p.requestState.Cancelled.IsEmpty()
905 func (p *Peer) decPeakRequests() {
906 // // This can occur when peak requests are altered by the update request timer to be lower than
907 // // the actual number of outstanding requests. Let's let it go negative and see what happens. I
908 // // wonder what happens if maxRequests is not signed.
909 // if p.peakRequests < 1 {
910 // panic(p.peakRequests)
915 func (p *Peer) recordBlockForSmartBan(req RequestIndex, blockData []byte) {
916 if p.bannableAddr.Ok {
917 p.t.smartBanCache.RecordBlock(p.bannableAddr.Value, req, blockData)