]> Sergey Matveev's repositories - btrtrc.git/blob - peer.go
Finish moving all Peer types and methods into peer.go
[btrtrc.git] / peer.go
1 package torrent
2
3 import (
4         "errors"
5         "fmt"
6         "io"
7         "net"
8         "strings"
9         "sync/atomic"
10         "time"
11
12         "github.com/RoaringBitmap/roaring"
13         "github.com/anacrolix/chansync"
14         . "github.com/anacrolix/generics"
15         "github.com/anacrolix/log"
16         "github.com/anacrolix/missinggo/iter"
17         "github.com/anacrolix/missinggo/v2/bitmap"
18         "github.com/anacrolix/multiless"
19
20         "github.com/anacrolix/torrent/internal/alloclim"
21         "github.com/anacrolix/torrent/mse"
22         pp "github.com/anacrolix/torrent/peer_protocol"
23         request_strategy "github.com/anacrolix/torrent/request-strategy"
24         "github.com/anacrolix/torrent/typed-roaring"
25 )
26
27 type (
28         Peer struct {
29                 // First to ensure 64-bit alignment for atomics. See #262.
30                 _stats ConnStats
31
32                 t *Torrent
33
34                 peerImpl
35                 callbacks *Callbacks
36
37                 outgoing   bool
38                 Network    string
39                 RemoteAddr PeerRemoteAddr
40                 // The local address as observed by the remote peer. WebRTC seems to get this right without needing hints from the
41                 // config.
42                 localPublicAddr peerLocalPublicAddr
43                 bannableAddr    Option[bannableAddr]
44                 // True if the connection is operating over MSE obfuscation.
45                 headerEncrypted bool
46                 cryptoMethod    mse.CryptoMethod
47                 Discovery       PeerSource
48                 trusted         bool
49                 closed          chansync.SetOnce
50                 // Set true after we've added our ConnStats generated during handshake to
51                 // other ConnStat instances as determined when the *Torrent became known.
52                 reconciledHandshakeStats bool
53
54                 lastMessageReceived     time.Time
55                 completedHandshake      time.Time
56                 lastUsefulChunkReceived time.Time
57                 lastChunkSent           time.Time
58
59                 // Stuff controlled by the local peer.
60                 needRequestUpdate    string
61                 requestState         request_strategy.PeerRequestState
62                 updateRequestsTimer  *time.Timer
63                 lastRequestUpdate    time.Time
64                 peakRequests         maxRequests
65                 lastBecameInterested time.Time
66                 priorInterest        time.Duration
67
68                 lastStartedExpectingToReceiveChunks time.Time
69                 cumulativeExpectedToReceiveChunks   time.Duration
70                 _chunksReceivedWhileExpecting       int64
71
72                 choking                                bool
73                 piecesReceivedSinceLastRequestUpdate   maxRequests
74                 maxPiecesReceivedBetweenRequestUpdates maxRequests
75                 // Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering,
76                 // and implementation differences, we may receive chunks that are no longer in the set of
77                 // requests actually want. This could use a roaring.BSI if the memory use becomes noticeable.
78                 validReceiveChunks map[RequestIndex]int
79                 // Indexed by metadata piece, set to true if posted and pending a
80                 // response.
81                 metadataRequests []bool
82                 sentHaves        bitmap.Bitmap
83
84                 // Stuff controlled by the remote peer.
85                 peerInterested        bool
86                 peerChoking           bool
87                 peerRequests          map[Request]*peerRequestState
88                 PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake
89                 PeerListenPort        int
90                 // The highest possible number of pieces the torrent could have based on
91                 // communication with the peer. Generally only useful until we have the
92                 // torrent info.
93                 peerMinPieces pieceIndex
94                 // Pieces we've accepted chunks for from the peer.
95                 peerTouchedPieces map[pieceIndex]struct{}
96                 peerAllowedFast   typedRoaring.Bitmap[pieceIndex]
97
98                 PeerMaxRequests  maxRequests // Maximum pending requests the peer allows.
99                 PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
100                 PeerClientName   atomic.Value
101
102                 logger log.Logger
103         }
104
105         PeerSource string
106
107         peerRequestState struct {
108                 data             []byte
109                 allocReservation *alloclim.Reservation
110         }
111
112         PeerRemoteAddr interface {
113                 String() string
114         }
115
116         peerRequests = orderedBitmap[RequestIndex]
117 )
118
119 const (
120         PeerSourceTracker         = "Tr"
121         PeerSourceIncoming        = "I"
122         PeerSourceDhtGetPeers     = "Hg" // Peers we found by searching a DHT.
123         PeerSourceDhtAnnouncePeer = "Ha" // Peers that were announced to us by a DHT.
124         PeerSourcePex             = "X"
125         // The peer was given directly, such as through a magnet link.
126         PeerSourceDirect = "M"
127 )
128
129 func (p *Peer) initRequestState() {
130         p.requestState.Requests = &peerRequests{}
131 }
132
133 func (cn *Peer) updateExpectingChunks() {
134         if cn.expectingChunks() {
135                 if cn.lastStartedExpectingToReceiveChunks.IsZero() {
136                         cn.lastStartedExpectingToReceiveChunks = time.Now()
137                 }
138         } else {
139                 if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
140                         cn.cumulativeExpectedToReceiveChunks += time.Since(cn.lastStartedExpectingToReceiveChunks)
141                         cn.lastStartedExpectingToReceiveChunks = time.Time{}
142                 }
143         }
144 }
145
146 func (cn *Peer) expectingChunks() bool {
147         if cn.requestState.Requests.IsEmpty() {
148                 return false
149         }
150         if !cn.requestState.Interested {
151                 return false
152         }
153         if !cn.peerChoking {
154                 return true
155         }
156         haveAllowedFastRequests := false
157         cn.peerAllowedFast.Iterate(func(i pieceIndex) bool {
158                 haveAllowedFastRequests = roaringBitmapRangeCardinality[RequestIndex](
159                         cn.requestState.Requests,
160                         cn.t.pieceRequestIndexOffset(i),
161                         cn.t.pieceRequestIndexOffset(i+1),
162                 ) == 0
163                 return !haveAllowedFastRequests
164         })
165         return haveAllowedFastRequests
166 }
167
168 func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool {
169         return cn.peerChoking && !cn.peerAllowedFast.Contains(piece)
170 }
171
172 func (cn *Peer) cumInterest() time.Duration {
173         ret := cn.priorInterest
174         if cn.requestState.Interested {
175                 ret += time.Since(cn.lastBecameInterested)
176         }
177         return ret
178 }
179
180 func (cn *Peer) locker() *lockWithDeferreds {
181         return cn.t.cl.locker()
182 }
183
184 func (cn *Peer) supportsExtension(ext pp.ExtensionName) bool {
185         _, ok := cn.PeerExtensionIDs[ext]
186         return ok
187 }
188
189 // The best guess at number of pieces in the torrent for this peer.
190 func (cn *Peer) bestPeerNumPieces() pieceIndex {
191         if cn.t.haveInfo() {
192                 return cn.t.numPieces()
193         }
194         return cn.peerMinPieces
195 }
196
197 func (cn *Peer) completedString() string {
198         have := pieceIndex(cn.peerPieces().GetCardinality())
199         if all, _ := cn.peerHasAllPieces(); all {
200                 have = cn.bestPeerNumPieces()
201         }
202         return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces())
203 }
204
205 func eventAgeString(t time.Time) string {
206         if t.IsZero() {
207                 return "never"
208         }
209         return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds())
210 }
211
212 // Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text.
213 func (cn *Peer) statusFlags() (ret string) {
214         c := func(b byte) {
215                 ret += string([]byte{b})
216         }
217         if cn.requestState.Interested {
218                 c('i')
219         }
220         if cn.choking {
221                 c('c')
222         }
223         c('-')
224         ret += cn.connectionFlags()
225         c('-')
226         if cn.peerInterested {
227                 c('i')
228         }
229         if cn.peerChoking {
230                 c('c')
231         }
232         return
233 }
234
235 func (cn *Peer) downloadRate() float64 {
236         num := cn._stats.BytesReadUsefulData.Int64()
237         if num == 0 {
238                 return 0
239         }
240         return float64(num) / cn.totalExpectingTime().Seconds()
241 }
242
243 func (cn *Peer) DownloadRate() float64 {
244         cn.locker().RLock()
245         defer cn.locker().RUnlock()
246
247         return cn.downloadRate()
248 }
249
250 func (cn *Peer) iterContiguousPieceRequests(f func(piece pieceIndex, count int)) {
251         var last Option[pieceIndex]
252         var count int
253         next := func(item Option[pieceIndex]) {
254                 if item == last {
255                         count++
256                 } else {
257                         if count != 0 {
258                                 f(last.Value, count)
259                         }
260                         last = item
261                         count = 1
262                 }
263         }
264         cn.requestState.Requests.Iterate(func(requestIndex request_strategy.RequestIndex) bool {
265                 next(Some(cn.t.pieceIndexOfRequestIndex(requestIndex)))
266                 return true
267         })
268         next(None[pieceIndex]())
269 }
270
271 func (cn *Peer) writeStatus(w io.Writer, t *Torrent) {
272         // \t isn't preserved in <pre> blocks?
273         if cn.closed.IsSet() {
274                 fmt.Fprint(w, "CLOSED: ")
275         }
276         fmt.Fprintln(w, strings.Join(cn.peerImplStatusLines(), "\n"))
277         prio, err := cn.peerPriority()
278         prioStr := fmt.Sprintf("%08x", prio)
279         if err != nil {
280                 prioStr += ": " + err.Error()
281         }
282         fmt.Fprintf(w, "bep40-prio: %v\n", prioStr)
283         fmt.Fprintf(w, "last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n",
284                 eventAgeString(cn.lastMessageReceived),
285                 eventAgeString(cn.completedHandshake),
286                 eventAgeString(cn.lastHelpful()),
287                 cn.cumInterest(),
288                 cn.totalExpectingTime(),
289         )
290         fmt.Fprintf(w,
291                 "%s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d+%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n",
292                 cn.completedString(),
293                 len(cn.peerTouchedPieces),
294                 &cn._stats.ChunksReadUseful,
295                 &cn._stats.ChunksRead,
296                 &cn._stats.ChunksWritten,
297                 cn.requestState.Requests.GetCardinality(),
298                 cn.requestState.Cancelled.GetCardinality(),
299                 cn.nominalMaxRequests(),
300                 cn.PeerMaxRequests,
301                 len(cn.peerRequests),
302                 localClientReqq,
303                 cn.statusFlags(),
304                 cn.downloadRate()/(1<<10),
305         )
306         fmt.Fprintf(w, "requested pieces:")
307         cn.iterContiguousPieceRequests(func(piece pieceIndex, count int) {
308                 fmt.Fprintf(w, " %v(%v)", piece, count)
309         })
310         fmt.Fprintf(w, "\n")
311 }
312
313 func (p *Peer) close() {
314         if !p.closed.Set() {
315                 return
316         }
317         if p.updateRequestsTimer != nil {
318                 p.updateRequestsTimer.Stop()
319         }
320         p.peerImpl.onClose()
321         if p.t != nil {
322                 p.t.decPeerPieceAvailability(p)
323         }
324         for _, f := range p.callbacks.PeerClosed {
325                 f(p)
326         }
327 }
328
329 // Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think
330 // they do (known=true).
331 func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
332         if all, known := cn.peerHasAllPieces(); all && known {
333                 return true
334         }
335         return cn.peerPieces().ContainsInt(piece)
336 }
337
338 // 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
339 // https://github.com/pion/datachannel/issues/59 is fixed.
340 const (
341         writeBufferHighWaterLen = 1 << 15
342         writeBufferLowWaterLen  = writeBufferHighWaterLen / 2
343 )
344
345 var (
346         interestedMsgLen = len(pp.Message{Type: pp.Interested}.MustMarshalBinary())
347         requestMsgLen    = len(pp.Message{Type: pp.Request}.MustMarshalBinary())
348         // This is the maximum request count that could fit in the write buffer if it's at or below the
349         // low water mark when we run maybeUpdateActualRequestState.
350         maxLocalToRemoteRequests = (writeBufferHighWaterLen - writeBufferLowWaterLen - interestedMsgLen) / requestMsgLen
351 )
352
353 // The actual value to use as the maximum outbound requests.
354 func (cn *Peer) nominalMaxRequests() maxRequests {
355         return maxInt(1, minInt(cn.PeerMaxRequests, cn.peakRequests*2, maxLocalToRemoteRequests))
356 }
357
358 func (cn *Peer) totalExpectingTime() (ret time.Duration) {
359         ret = cn.cumulativeExpectedToReceiveChunks
360         if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
361                 ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
362         }
363         return
364 }
365
366 func (cn *Peer) setInterested(interested bool) bool {
367         if cn.requestState.Interested == interested {
368                 return true
369         }
370         cn.requestState.Interested = interested
371         if interested {
372                 cn.lastBecameInterested = time.Now()
373         } else if !cn.lastBecameInterested.IsZero() {
374                 cn.priorInterest += time.Since(cn.lastBecameInterested)
375         }
376         cn.updateExpectingChunks()
377         // log.Printf("%p: setting interest: %v", cn, interested)
378         return cn.writeInterested(interested)
379 }
380
381 // The function takes a message to be sent, and returns true if more messages
382 // are okay.
383 type messageWriter func(pp.Message) bool
384
385 // This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it
386 // when we want to go fast.
387 func (cn *Peer) shouldRequest(r RequestIndex) error {
388         err := cn.t.checkValidReceiveChunk(cn.t.requestIndexToRequest(r))
389         if err != nil {
390                 return err
391         }
392         pi := cn.t.pieceIndexOfRequestIndex(r)
393         if cn.requestState.Cancelled.Contains(r) {
394                 return errors.New("request is cancelled and waiting acknowledgement")
395         }
396         if !cn.peerHasPiece(pi) {
397                 return errors.New("requesting piece peer doesn't have")
398         }
399         if !cn.t.peerIsActive(cn) {
400                 panic("requesting but not in active conns")
401         }
402         if cn.closed.IsSet() {
403                 panic("requesting when connection is closed")
404         }
405         if cn.t.hashingPiece(pi) {
406                 panic("piece is being hashed")
407         }
408         if cn.t.pieceQueuedForHash(pi) {
409                 panic("piece is queued for hash")
410         }
411         if cn.peerChoking && !cn.peerAllowedFast.Contains(pi) {
412                 // This could occur if we made a request with the fast extension, and then got choked and
413                 // haven't had the request rejected yet.
414                 if !cn.requestState.Requests.Contains(r) {
415                         panic("peer choking and piece not allowed fast")
416                 }
417         }
418         return nil
419 }
420
421 func (cn *Peer) mustRequest(r RequestIndex) bool {
422         more, err := cn.request(r)
423         if err != nil {
424                 panic(err)
425         }
426         return more
427 }
428
429 func (cn *Peer) request(r RequestIndex) (more bool, err error) {
430         if err := cn.shouldRequest(r); err != nil {
431                 panic(err)
432         }
433         if cn.requestState.Requests.Contains(r) {
434                 return true, nil
435         }
436         if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
437                 return true, errors.New("too many outstanding requests")
438         }
439         cn.requestState.Requests.Add(r)
440         if cn.validReceiveChunks == nil {
441                 cn.validReceiveChunks = make(map[RequestIndex]int)
442         }
443         cn.validReceiveChunks[r]++
444         cn.t.requestState[r] = requestState{
445                 peer: cn,
446                 when: time.Now(),
447         }
448         cn.updateExpectingChunks()
449         ppReq := cn.t.requestIndexToRequest(r)
450         for _, f := range cn.callbacks.SentRequest {
451                 f(PeerRequestEvent{cn, ppReq})
452         }
453         return cn.peerImpl._request(ppReq), nil
454 }
455
456 func (me *Peer) cancel(r RequestIndex) {
457         if !me.deleteRequest(r) {
458                 panic("request not existing should have been guarded")
459         }
460         if me._cancel(r) {
461                 if !me.requestState.Cancelled.CheckedAdd(r) {
462                         panic("request already cancelled")
463                 }
464         }
465         me.decPeakRequests()
466         if me.isLowOnRequests() {
467                 me.updateRequests("Peer.cancel")
468         }
469 }
470
471 // Sets a reason to update requests, and if there wasn't already one, handle it.
472 func (cn *Peer) updateRequests(reason string) {
473         if cn.needRequestUpdate != "" {
474                 return
475         }
476         if reason != peerUpdateRequestsTimerReason && !cn.isLowOnRequests() {
477                 return
478         }
479         cn.needRequestUpdate = reason
480         cn.handleUpdateRequests()
481 }
482
483 // Emits the indices in the Bitmaps bms in order, never repeating any index.
484 // skip is mutated during execution, and its initial values will never be
485 // emitted.
486 func iterBitmapsDistinct(skip *bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func {
487         return func(cb iter.Callback) {
488                 for _, bm := range bms {
489                         if !iter.All(
490                                 func(_i interface{}) bool {
491                                         i := _i.(int)
492                                         if skip.Contains(bitmap.BitIndex(i)) {
493                                                 return true
494                                         }
495                                         skip.Add(bitmap.BitIndex(i))
496                                         return cb(i)
497                                 },
498                                 bm.Iter,
499                         ) {
500                                 return
501                         }
502                 }
503         }
504 }
505
506 func (cn *Peer) peerPiecesChanged() {
507         cn.t.maybeDropMutuallyCompletePeer(cn)
508 }
509
510 // After handshake, we know what Torrent and Client stats to include for a
511 // connection.
512 func (cn *Peer) postHandshakeStats(f func(*ConnStats)) {
513         t := cn.t
514         f(&t.stats)
515         f(&t.cl.stats)
516 }
517
518 // All ConnStats that include this connection. Some objects are not known
519 // until the handshake is complete, after which it's expected to reconcile the
520 // differences.
521 func (cn *Peer) allStats(f func(*ConnStats)) {
522         f(&cn._stats)
523         if cn.reconciledHandshakeStats {
524                 cn.postHandshakeStats(f)
525         }
526 }
527
528 func (cn *Peer) readBytes(n int64) {
529         cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead }))
530 }
531
532 // Returns whether the connection could be useful to us. We're seeding and
533 // they want data, we don't have metainfo and they can provide it, etc.
534 func (c *Peer) useful() bool {
535         t := c.t
536         if c.closed.IsSet() {
537                 return false
538         }
539         if !t.haveInfo() {
540                 return c.supportsExtension("ut_metadata")
541         }
542         if t.seeding() && c.peerInterested {
543                 return true
544         }
545         if c.peerHasWantedPieces() {
546                 return true
547         }
548         return false
549 }
550
551 func (c *Peer) lastHelpful() (ret time.Time) {
552         ret = c.lastUsefulChunkReceived
553         if c.t.seeding() && c.lastChunkSent.After(ret) {
554                 ret = c.lastChunkSent
555         }
556         return
557 }
558
559 // Returns whether any part of the chunk would lie outside a piece of the given length.
560 func chunkOverflowsPiece(cs ChunkSpec, pieceLength pp.Integer) bool {
561         switch {
562         default:
563                 return false
564         case cs.Begin+cs.Length > pieceLength:
565         // Check for integer overflow
566         case cs.Begin > pp.IntegerMax-cs.Length:
567         }
568         return true
569 }
570
571 func runSafeExtraneous(f func()) {
572         if true {
573                 go f()
574         } else {
575                 f()
576         }
577 }
578
579 // Returns true if it was valid to reject the request.
580 func (c *Peer) remoteRejectedRequest(r RequestIndex) bool {
581         if c.deleteRequest(r) {
582                 c.decPeakRequests()
583         } else if !c.requestState.Cancelled.CheckedRemove(r) {
584                 return false
585         }
586         if c.isLowOnRequests() {
587                 c.updateRequests("Peer.remoteRejectedRequest")
588         }
589         c.decExpectedChunkReceive(r)
590         return true
591 }
592
593 func (c *Peer) decExpectedChunkReceive(r RequestIndex) {
594         count := c.validReceiveChunks[r]
595         if count == 1 {
596                 delete(c.validReceiveChunks, r)
597         } else if count > 1 {
598                 c.validReceiveChunks[r] = count - 1
599         } else {
600                 panic(r)
601         }
602 }
603
604 func (c *Peer) doChunkReadStats(size int64) {
605         c.allStats(func(cs *ConnStats) { cs.receivedChunk(size) })
606 }
607
608 // Handle a received chunk from a peer.
609 func (c *Peer) receiveChunk(msg *pp.Message) error {
610         chunksReceived.Add("total", 1)
611
612         ppReq := newRequestFromMessage(msg)
613         t := c.t
614         err := t.checkValidReceiveChunk(ppReq)
615         if err != nil {
616                 err = log.WithLevel(log.Warning, err)
617                 return err
618         }
619         req := c.t.requestIndexFromRequest(ppReq)
620
621         if c.bannableAddr.Ok {
622                 t.smartBanCache.RecordBlock(c.bannableAddr.Value, req, msg.Piece)
623         }
624
625         if c.peerChoking {
626                 chunksReceived.Add("while choked", 1)
627         }
628
629         if c.validReceiveChunks[req] <= 0 {
630                 chunksReceived.Add("unexpected", 1)
631                 return errors.New("received unexpected chunk")
632         }
633         c.decExpectedChunkReceive(req)
634
635         if c.peerChoking && c.peerAllowedFast.Contains(pieceIndex(ppReq.Index)) {
636                 chunksReceived.Add("due to allowed fast", 1)
637         }
638
639         // The request needs to be deleted immediately to prevent cancels occurring asynchronously when
640         // have actually already received the piece, while we have the Client unlocked to write the data
641         // out.
642         intended := false
643         {
644                 if c.requestState.Requests.Contains(req) {
645                         for _, f := range c.callbacks.ReceivedRequested {
646                                 f(PeerMessageEvent{c, msg})
647                         }
648                 }
649                 // Request has been satisfied.
650                 if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) {
651                         intended = true
652                         if !c.peerChoking {
653                                 c._chunksReceivedWhileExpecting++
654                         }
655                         if c.isLowOnRequests() {
656                                 c.updateRequests("Peer.receiveChunk deleted request")
657                         }
658                 } else {
659                         chunksReceived.Add("unintended", 1)
660                 }
661         }
662
663         cl := t.cl
664
665         // Do we actually want this chunk?
666         if t.haveChunk(ppReq) {
667                 // panic(fmt.Sprintf("%+v", ppReq))
668                 chunksReceived.Add("redundant", 1)
669                 c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
670                 return nil
671         }
672
673         piece := &t.pieces[ppReq.Index]
674
675         c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful }))
676         c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData }))
677         if intended {
678                 c.piecesReceivedSinceLastRequestUpdate++
679                 c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
680         }
681         for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData {
682                 f(ReceivedUsefulDataEvent{c, msg})
683         }
684         c.lastUsefulChunkReceived = time.Now()
685
686         // Need to record that it hasn't been written yet, before we attempt to do
687         // anything with it.
688         piece.incrementPendingWrites()
689         // Record that we have the chunk, so we aren't trying to download it while
690         // waiting for it to be written to storage.
691         piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
692
693         // Cancel pending requests for this chunk from *other* peers.
694         if p := t.requestingPeer(req); p != nil {
695                 if p == c {
696                         panic("should not be pending request from conn that just received it")
697                 }
698                 p.cancel(req)
699         }
700
701         err = func() error {
702                 cl.unlock()
703                 defer cl.lock()
704                 concurrentChunkWrites.Add(1)
705                 defer concurrentChunkWrites.Add(-1)
706                 // Write the chunk out. Note that the upper bound on chunk writing concurrency will be the
707                 // number of connections. We write inline with receiving the chunk (with this lock dance),
708                 // because we want to handle errors synchronously and I haven't thought of a nice way to
709                 // defer any concurrency to the storage and have that notify the client of errors. TODO: Do
710                 // that instead.
711                 return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
712         }()
713
714         piece.decrementPendingWrites()
715
716         if err != nil {
717                 c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err)
718                 t.pendRequest(req)
719                 // Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
720                 // request update runs while we're writing the chunk that just failed. Then we never do a
721                 // fresh update after pending the failed request.
722                 c.updateRequests("Peer.receiveChunk error writing chunk")
723                 t.onWriteChunkErr(err)
724                 return nil
725         }
726
727         c.onDirtiedPiece(pieceIndex(ppReq.Index))
728
729         // We need to ensure the piece is only queued once, so only the last chunk writer gets this job.
730         if t.pieceAllDirty(pieceIndex(ppReq.Index)) && piece.pendingWrites == 0 {
731                 t.queuePieceCheck(pieceIndex(ppReq.Index))
732                 // We don't pend all chunks here anymore because we don't want code dependent on the dirty
733                 // chunk status (such as the haveChunk call above) to have to check all the various other
734                 // piece states like queued for hash, hashing etc. This does mean that we need to be sure
735                 // that chunk pieces are pended at an appropriate time later however.
736         }
737
738         cl.event.Broadcast()
739         // We do this because we've written a chunk, and may change PieceState.Partial.
740         t.publishPieceChange(pieceIndex(ppReq.Index))
741
742         return nil
743 }
744
745 func (c *Peer) onDirtiedPiece(piece pieceIndex) {
746         if c.peerTouchedPieces == nil {
747                 c.peerTouchedPieces = make(map[pieceIndex]struct{})
748         }
749         c.peerTouchedPieces[piece] = struct{}{}
750         ds := &c.t.pieces[piece].dirtiers
751         if *ds == nil {
752                 *ds = make(map[*Peer]struct{})
753         }
754         (*ds)[c] = struct{}{}
755 }
756
757 func (cn *Peer) netGoodPiecesDirtied() int64 {
758         return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
759 }
760
761 func (c *Peer) peerHasWantedPieces() bool {
762         if all, _ := c.peerHasAllPieces(); all {
763                 return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty()
764         }
765         if !c.t.haveInfo() {
766                 return !c.peerPieces().IsEmpty()
767         }
768         return c.peerPieces().Intersects(&c.t._pendingPieces)
769 }
770
771 // Returns true if an outstanding request is removed. Cancelled requests should be handled
772 // separately.
773 func (c *Peer) deleteRequest(r RequestIndex) bool {
774         if !c.requestState.Requests.CheckedRemove(r) {
775                 return false
776         }
777         for _, f := range c.callbacks.DeletedRequest {
778                 f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)})
779         }
780         c.updateExpectingChunks()
781         if c.t.requestingPeer(r) != c {
782                 panic("only one peer should have a given request at a time")
783         }
784         delete(c.t.requestState, r)
785         // c.t.iterPeers(func(p *Peer) {
786         //      if p.isLowOnRequests() {
787         //              p.updateRequests("Peer.deleteRequest")
788         //      }
789         // })
790         return true
791 }
792
793 func (c *Peer) deleteAllRequests(reason string) {
794         if c.requestState.Requests.IsEmpty() {
795                 return
796         }
797         c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
798                 if !c.deleteRequest(x) {
799                         panic("request should exist")
800                 }
801                 return true
802         })
803         c.assertNoRequests()
804         c.t.iterPeers(func(p *Peer) {
805                 if p.isLowOnRequests() {
806                         p.updateRequests(reason)
807                 }
808         })
809         return
810 }
811
812 func (c *Peer) assertNoRequests() {
813         if !c.requestState.Requests.IsEmpty() {
814                 panic(c.requestState.Requests.GetCardinality())
815         }
816 }
817
818 func (c *Peer) cancelAllRequests() {
819         c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
820                 c.cancel(x)
821                 return true
822         })
823         c.assertNoRequests()
824         return
825 }
826
827 func (c *Peer) peerPriority() (peerPriority, error) {
828         return bep40Priority(c.remoteIpPort(), c.localPublicAddr)
829 }
830
831 func (c *Peer) remoteIp() net.IP {
832         host, _, _ := net.SplitHostPort(c.RemoteAddr.String())
833         return net.ParseIP(host)
834 }
835
836 func (c *Peer) remoteIpPort() IpPort {
837         ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr)
838         return IpPort{ipa.IP, uint16(ipa.Port)}
839 }
840
841 func (c *Peer) trust() connectionTrust {
842         return connectionTrust{c.trusted, c.netGoodPiecesDirtied()}
843 }
844
845 type connectionTrust struct {
846         Implicit            bool
847         NetGoodPiecesDirted int64
848 }
849
850 func (l connectionTrust) Less(r connectionTrust) bool {
851         return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less()
852 }
853
854 // Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims.
855 func (cn *Peer) newPeerPieces() *roaring.Bitmap {
856         // TODO: Can we use copy on write?
857         ret := cn.peerPieces().Clone()
858         if all, _ := cn.peerHasAllPieces(); all {
859                 if cn.t.haveInfo() {
860                         ret.AddRange(0, bitmap.BitRange(cn.t.numPieces()))
861                 } else {
862                         ret.AddRange(0, bitmap.ToEnd)
863                 }
864         }
865         return ret
866 }
867
868 func (cn *Peer) stats() *ConnStats {
869         return &cn._stats
870 }
871
872 func (p *Peer) TryAsPeerConn() (*PeerConn, bool) {
873         pc, ok := p.peerImpl.(*PeerConn)
874         return pc, ok
875 }
876
877 func (p *Peer) uncancelledRequests() uint64 {
878         return p.requestState.Requests.GetCardinality()
879 }
880
881 type peerLocalPublicAddr = IpPort
882
883 func (p *Peer) isLowOnRequests() bool {
884         return p.requestState.Requests.IsEmpty() && p.requestState.Cancelled.IsEmpty()
885 }
886
887 func (p *Peer) decPeakRequests() {
888         // // This can occur when peak requests are altered by the update request timer to be lower than
889         // // the actual number of outstanding requests. Let's let it go negative and see what happens. I
890         // // wonder what happens if maxRequests is not signed.
891         // if p.peakRequests < 1 {
892         //      panic(p.peakRequests)
893         // }
894         p.peakRequests--
895 }