16 "github.com/anacrolix/log"
18 "github.com/anacrolix/missinggo"
19 "github.com/anacrolix/missinggo/bitmap"
20 "github.com/anacrolix/missinggo/iter"
21 "github.com/anacrolix/missinggo/prioritybitmap"
23 "github.com/anacrolix/torrent/bencode"
24 "github.com/anacrolix/torrent/mse"
25 pp "github.com/anacrolix/torrent/peer_protocol"
28 type peerSource string
31 peerSourceTracker = "T" // It's the default.
32 peerSourceIncoming = "I"
33 peerSourceDHTGetPeers = "Hg"
34 peerSourceDHTAnnouncePeer = "Ha"
38 // Maintains the state of a connection with a peer.
39 type connection struct {
41 // The actual Conn, used for closing, and setting socket options.
43 // The Reader and Writer for this Conn, with hooks installed for stats,
44 // limiting, deadlines etc.
47 // True if the connection is operating over MSE obfuscation.
52 closed missinggo.Event
56 lastMessageReceived time.Time
57 completedHandshake time.Time
58 lastUsefulChunkReceived time.Time
59 lastChunkSent time.Time
61 // Stuff controlled by the local peer.
64 requests map[request]struct{}
66 // Indexed by metadata piece, set to true if posted and pending a
68 metadataRequests []bool
69 sentHaves bitmap.Bitmap
71 // Stuff controlled by the remote peer.
75 PeerRequests map[request]struct{}
76 PeerExtensionBytes peerExtensionBytes
77 // The pieces the peer has claimed to have.
78 peerPieces bitmap.Bitmap
79 // The peer has everything. This can occur due to a special message, when
80 // we may not even know the number of pieces in the torrent yet.
82 // The highest possible number of pieces the torrent could have based on
83 // communication with the peer. Generally only useful until we have the
86 // Pieces we've accepted chunks for from the peer.
87 peerTouchedPieces map[int]struct{}
89 PeerMaxRequests int // Maximum pending requests the peer allows.
90 PeerExtensionIDs map[string]byte
93 pieceInclination []int
94 pieceRequestOrder prioritybitmap.PriorityBitmap
96 writeBuffer *bytes.Buffer
97 uploadTimer *time.Timer
101 func (cn *connection) peerHasAllPieces() (all bool, known bool) {
102 if cn.peerSentHaveAll {
105 if !cn.t.haveInfo() {
108 return bitmap.Flip(cn.peerPieces, 0, cn.t.numPieces()).IsEmpty(), true
111 func (cn *connection) mu() sync.Locker {
115 func (cn *connection) remoteAddr() net.Addr {
116 return cn.conn.RemoteAddr()
119 func (cn *connection) localAddr() net.Addr {
120 return cn.conn.LocalAddr()
123 func (cn *connection) supportsExtension(ext string) bool {
124 _, ok := cn.PeerExtensionIDs[ext]
128 // The best guess at number of pieces in the torrent for this peer.
129 func (cn *connection) bestPeerNumPieces() int {
131 return cn.t.numPieces()
133 return cn.peerMinPieces
136 func (cn *connection) completedString() string {
137 return fmt.Sprintf("%d/%d", cn.peerPieces.Len(), cn.bestPeerNumPieces())
140 // Correct the PeerPieces slice length. Return false if the existing slice is
141 // invalid, such as by receiving badly sized BITFIELD, or invalid HAVE
143 func (cn *connection) setNumPieces(num int) error {
144 cn.peerPieces.RemoveRange(num, bitmap.ToEnd)
145 cn.peerPiecesChanged()
149 func eventAgeString(t time.Time) string {
153 return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds())
156 func (cn *connection) connectionFlags() (ret string) {
158 ret += string([]byte{b})
160 if cn.cryptoMethod == mse.CryptoMethodRC4 {
162 } else if cn.headerEncrypted {
165 ret += string(cn.Discovery)
172 // Inspired by https://trac.transmissionbt.com/wiki/PeerStatusText
173 func (cn *connection) statusFlags() (ret string) {
175 ret += string([]byte{b})
184 ret += cn.connectionFlags()
186 if cn.PeerInterested {
195 func (cn *connection) String() string {
197 cn.WriteStatus(&buf, nil)
201 func (cn *connection) WriteStatus(w io.Writer, t *Torrent) {
202 // \t isn't preserved in <pre> blocks?
203 fmt.Fprintf(w, "%-40s: %s-%s\n", cn.PeerID, cn.localAddr(), cn.remoteAddr())
204 fmt.Fprintf(w, " last msg: %s, connected: %s, last helpful: %s\n",
205 eventAgeString(cn.lastMessageReceived),
206 eventAgeString(cn.completedHandshake),
207 eventAgeString(cn.lastHelpful()))
209 " %s completed, %d pieces touched, good chunks: %d/%d-%d reqq: %d-%d, flags: %s\n",
210 cn.completedString(),
211 len(cn.peerTouchedPieces),
212 cn.stats.ChunksReadUseful,
213 // TODO: Use ChunksRead? Verify that value is the same as this sum?
214 cn.stats.ChunksReadUnwanted+cn.stats.ChunksReadUseful,
215 cn.stats.ChunksWritten,
216 cn.numLocalRequests(),
217 len(cn.PeerRequests),
220 roi := cn.pieceRequestOrderIter()
221 fmt.Fprintf(w, " next pieces: %v%s\n",
222 iter.ToSlice(iter.Head(10, roi)),
224 if cn.shouldRequestWithoutBias() {
232 func (cn *connection) Close() {
233 if !cn.closed.Set() {
237 cn.discardPieceInclination()
238 cn.pieceRequestOrder.Clear()
244 func (cn *connection) PeerHasPiece(piece int) bool {
245 return cn.peerSentHaveAll || cn.peerPieces.Contains(piece)
248 // Writes a message into the write buffer.
249 func (cn *connection) Post(msg pp.Message) {
250 messageTypesPosted.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
251 // We don't need to track bytes here because a connection.w Writer wrapper
252 // takes care of that (although there's some delay between us recording
253 // the message, and the connection writer flushing it out.).
254 cn.writeBuffer.Write(msg.MustMarshalBinary())
255 // Last I checked only Piece messages affect stats, and we don't post
261 func (cn *connection) requestMetadataPiece(index int) {
262 eID := cn.PeerExtensionIDs["ut_metadata"]
266 if index < len(cn.metadataRequests) && cn.metadataRequests[index] {
272 ExtendedPayload: func() []byte {
273 b, err := bencode.Marshal(map[string]int{
274 "msg_type": pp.RequestMetadataExtensionMsgType,
283 for index >= len(cn.metadataRequests) {
284 cn.metadataRequests = append(cn.metadataRequests, false)
286 cn.metadataRequests[index] = true
289 func (cn *connection) requestedMetadataPiece(index int) bool {
290 return index < len(cn.metadataRequests) && cn.metadataRequests[index]
293 // The actual value to use as the maximum outbound requests.
294 func (cn *connection) nominalMaxRequests() (ret int) {
295 ret = cn.PeerMaxRequests
302 // Returns true if an unsatisfied request was canceled.
303 func (cn *connection) PeerCancel(r request) bool {
304 if cn.PeerRequests == nil {
307 if _, ok := cn.PeerRequests[r]; !ok {
310 delete(cn.PeerRequests, r)
314 func (cn *connection) Choke(msg func(pp.Message) bool) bool {
318 cn.PeerRequests = nil
320 return msg(pp.Message{
325 func (cn *connection) Unchoke(msg func(pp.Message) bool) bool {
330 return msg(pp.Message{
335 func (cn *connection) SetInterested(interested bool, msg func(pp.Message) bool) bool {
336 if cn.Interested == interested {
339 cn.Interested = interested
340 // log.Printf("%p: setting interest: %v", cn, interested)
341 return msg(pp.Message{
342 Type: func() pp.MessageType {
346 return pp.NotInterested
352 // The function takes a message to be sent, and returns true if more messages
354 type messageWriter func(pp.Message) bool
356 // Proxies the messageWriter's response.
357 func (cn *connection) request(r request, mw messageWriter) bool {
358 if cn.requests == nil {
359 cn.requests = make(map[request]struct{}, cn.nominalMaxRequests())
361 if _, ok := cn.requests[r]; ok {
362 panic("chunk already requested")
364 if !cn.PeerHasPiece(r.Index.Int()) {
365 panic("requesting piece peer doesn't have")
367 cn.requests[r] = struct{}{}
368 if _, ok := cn.t.conns[cn]; !ok {
369 panic("requesting but not in active conns")
371 cn.t.pendingRequests[r]++
372 return mw(pp.Message{
380 func (cn *connection) fillWriteBuffer(msg func(pp.Message) bool) {
381 numFillBuffers.Add(1)
382 cancel, new, i := cn.desiredRequestState()
383 if !cn.SetInterested(i, msg) {
386 if cancel && len(cn.requests) != 0 {
387 fillBufferSentCancels.Add(1)
388 for r := range cn.requests {
390 // log.Printf("%p: cancelling request: %v", cn, r)
391 if !msg(makeCancelMessage(r)) {
397 fillBufferSentRequests.Add(1)
398 for _, r := range new {
399 if !cn.request(r, msg) {
400 // If we didn't completely top up the requests, we shouldn't
401 // mark the low water, since we'll want to top up the requests
402 // as soon as we have more write buffer space.
406 cn.requestsLowWater = len(cn.requests) / 2
411 // Routine that writes to the peer. Some of what to write is buffered by
412 // activity elsewhere in the Client, and some is determined locally when the
413 // connection is writable.
414 func (cn *connection) writer(keepAliveTimeout time.Duration) {
416 lastWrite time.Time = time.Now()
417 keepAliveTimer *time.Timer
419 keepAliveTimer = time.AfterFunc(keepAliveTimeout, func() {
421 defer cn.mu().Unlock()
422 if time.Since(lastWrite) >= keepAliveTimeout {
425 keepAliveTimer.Reset(keepAliveTimeout)
428 defer cn.mu().Unlock()
430 defer keepAliveTimer.Stop()
431 frontBuf := new(bytes.Buffer)
433 if cn.closed.IsSet() {
436 if cn.writeBuffer.Len() == 0 {
437 cn.fillWriteBuffer(func(msg pp.Message) bool {
439 cn.writeBuffer.Write(msg.MustMarshalBinary())
440 return cn.writeBuffer.Len() < 1<<16
443 if cn.writeBuffer.Len() == 0 && time.Since(lastWrite) >= keepAliveTimeout {
444 cn.writeBuffer.Write(pp.Message{Keepalive: true}.MustMarshalBinary())
445 postedKeepalives.Add(1)
447 if cn.writeBuffer.Len() == 0 {
448 // TODO: Minimize wakeups....
453 frontBuf, cn.writeBuffer = cn.writeBuffer, frontBuf
455 n, err := cn.w.Write(frontBuf.Bytes())
458 lastWrite = time.Now()
459 keepAliveTimer.Reset(keepAliveTimeout)
464 if n != frontBuf.Len() {
471 func (cn *connection) Have(piece int) {
472 if cn.sentHaves.Get(piece) {
477 Index: pp.Integer(piece),
479 cn.sentHaves.Add(piece)
482 func (cn *connection) PostBitfield() {
483 if cn.sentHaves.Len() != 0 {
484 panic("bitfield must be first have-related message sent")
486 if !cn.t.haveAnyPieces() {
491 Bitfield: cn.t.bitfield(),
493 cn.sentHaves = cn.t.completedPieces.Copy()
496 // Determines interest and requests to send to a connected peer.
497 func nextRequestState(
498 networkingEnabled bool,
499 currentRequests map[request]struct{},
501 iterPendingRequests func(f func(request) bool),
502 requestsLowWater int,
503 requestsHighWater int,
505 cancelExisting bool, // Cancel all our pending requests
506 newRequests []request, // Chunks to request that we currently aren't
507 interested bool, // Whether we should indicate interest, even if we don't request anything
509 if !networkingEnabled {
510 return true, nil, false
512 if len(currentRequests) > requestsLowWater {
513 return false, nil, true
515 iterPendingRequests(func(r request) bool {
520 if _, ok := currentRequests[r]; !ok {
521 if newRequests == nil {
522 newRequests = make([]request, 0, requestsHighWater-len(currentRequests))
524 newRequests = append(newRequests, r)
526 return len(currentRequests)+len(newRequests) < requestsHighWater
531 func (cn *connection) updateRequests() {
532 // log.Print("update requests")
536 // Emits the indices in the Bitmaps bms in order, never repeating any index.
537 // skip is mutated during execution, and its initial values will never be
539 func iterBitmapsDistinct(skip bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func {
540 return func(cb iter.Callback) {
541 for _, bm := range bms {
542 if !iter.All(func(i interface{}) bool {
545 }, bitmap.Sub(bm, skip).Iter) {
552 func (cn *connection) unbiasedPieceRequestOrder() iter.Func {
553 now, readahead := cn.t.readerPiecePriorities()
554 var skip bitmap.Bitmap
555 if !cn.peerSentHaveAll {
556 // Pieces to skip include pieces the peer doesn't have
557 skip = bitmap.Flip(cn.peerPieces, 0, cn.t.numPieces())
559 // And pieces that we already have.
560 skip.Union(cn.t.completedPieces)
561 // Return an iterator over the different priority classes, minus the skip
564 iterBitmapsDistinct(skip, now, readahead),
565 func(cb iter.Callback) {
566 cn.t.pendingPieces.IterTyped(func(piece int) bool {
567 if skip.Contains(piece) {
578 // The connection should download highest priority pieces first, without any
579 // inclination toward avoiding wastage. Generally we might do this if there's
580 // a single connection, or this is the fastest connection, and we have active
581 // readers that signal an ordering preference. It's conceivable that the best
582 // connection should do this, since it's least likely to waste our time if
583 // assigned to the highest priority pieces, and assigning more than one this
584 // role would cause significant wasted bandwidth.
585 func (cn *connection) shouldRequestWithoutBias() bool {
586 if cn.t.requestStrategy != 2 {
589 if len(cn.t.readers) == 0 {
592 if len(cn.t.conns) == 1 {
595 if cn == cn.t.fastestConn {
601 func (cn *connection) pieceRequestOrderIter() iter.Func {
602 if cn.shouldRequestWithoutBias() {
603 return cn.unbiasedPieceRequestOrder()
605 return cn.pieceRequestOrder.Iter
609 func (cn *connection) iterPendingRequests(f func(request) bool) {
610 cn.pieceRequestOrderIter()(func(_piece interface{}) bool {
611 piece := _piece.(int)
612 return iterUndirtiedChunks(piece, cn.t, func(cs chunkSpec) bool {
613 r := request{pp.Integer(piece), cs}
614 // log.Println(r, cn.t.pendingRequests[r], cn.requests)
615 // if _, ok := cn.requests[r]; !ok && cn.t.pendingRequests[r] != 0 {
623 func (cn *connection) desiredRequestState() (bool, []request, bool) {
624 return nextRequestState(
625 cn.t.networkingEnabled,
628 cn.iterPendingRequests,
630 cn.nominalMaxRequests(),
634 func iterUndirtiedChunks(piece int, t *Torrent, f func(chunkSpec) bool) bool {
635 chunkIndices := t.pieces[piece].undirtiedChunkIndices().ToSortedSlice()
636 // TODO: Use "math/rand".Shuffle >= Go 1.10
637 return iter.ForPerm(len(chunkIndices), func(i int) bool {
638 return f(t.chunkIndexSpec(chunkIndices[i], piece))
642 // check callers updaterequests
643 func (cn *connection) stopRequestingPiece(piece int) bool {
644 return cn.pieceRequestOrder.Remove(piece)
647 // This is distinct from Torrent piece priority, which is the user's
648 // preference. Connection piece priority is specific to a connection and is
649 // used to pseudorandomly avoid connections always requesting the same pieces
650 // and thus wasting effort.
651 func (cn *connection) updatePiecePriority(piece int) bool {
652 tpp := cn.t.piecePriority(piece)
653 if !cn.PeerHasPiece(piece) {
654 tpp = PiecePriorityNone
656 if tpp == PiecePriorityNone {
657 return cn.stopRequestingPiece(piece)
659 prio := cn.getPieceInclination()[piece]
660 switch cn.t.requestStrategy {
663 case PiecePriorityNormal:
664 case PiecePriorityReadahead:
665 prio -= cn.t.numPieces()
666 case PiecePriorityNext, PiecePriorityNow:
667 prio -= 2 * cn.t.numPieces()
674 return cn.pieceRequestOrder.Set(piece, prio) || cn.shouldRequestWithoutBias()
677 func (cn *connection) getPieceInclination() []int {
678 if cn.pieceInclination == nil {
679 cn.pieceInclination = cn.t.getConnPieceInclination()
681 return cn.pieceInclination
684 func (cn *connection) discardPieceInclination() {
685 if cn.pieceInclination == nil {
688 cn.t.putPieceInclination(cn.pieceInclination)
689 cn.pieceInclination = nil
692 func (cn *connection) peerPiecesChanged() {
694 prioritiesChanged := false
695 for i := range iter.N(cn.t.numPieces()) {
696 if cn.updatePiecePriority(i) {
697 prioritiesChanged = true
700 if prioritiesChanged {
706 func (cn *connection) raisePeerMinPieces(newMin int) {
707 if newMin > cn.peerMinPieces {
708 cn.peerMinPieces = newMin
712 func (cn *connection) peerSentHave(piece int) error {
713 if cn.t.haveInfo() && piece >= cn.t.numPieces() || piece < 0 {
714 return errors.New("invalid piece")
716 if cn.PeerHasPiece(piece) {
719 cn.raisePeerMinPieces(piece + 1)
720 cn.peerPieces.Set(piece, true)
721 if cn.updatePiecePriority(piece) {
727 func (cn *connection) peerSentBitfield(bf []bool) error {
728 cn.peerSentHaveAll = false
730 panic("expected bitfield length divisible by 8")
732 // We know that the last byte means that at most the last 7 bits are
734 cn.raisePeerMinPieces(len(bf) - 7)
735 if cn.t.haveInfo() && len(bf) > cn.t.numPieces() {
736 // Ignore known excess pieces.
737 bf = bf[:cn.t.numPieces()]
739 for i, have := range bf {
741 cn.raisePeerMinPieces(i + 1)
743 cn.peerPieces.Set(i, have)
745 cn.peerPiecesChanged()
749 func (cn *connection) onPeerSentHaveAll() error {
750 cn.peerSentHaveAll = true
751 cn.peerPieces.Clear()
752 cn.peerPiecesChanged()
756 func (cn *connection) peerSentHaveNone() error {
757 cn.peerPieces.Clear()
758 cn.peerSentHaveAll = false
759 cn.peerPiecesChanged()
763 func (c *connection) requestPendingMetadata() {
767 if c.PeerExtensionIDs["ut_metadata"] == 0 {
768 // Peer doesn't support this.
771 // Request metadata pieces that we don't have in a random order.
773 for index := 0; index < c.t.metadataPieceCount(); index++ {
774 if !c.t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) {
775 pending = append(pending, index)
778 for _, i := range rand.Perm(len(pending)) {
779 c.requestMetadataPiece(pending[i])
783 func (cn *connection) wroteMsg(msg *pp.Message) {
784 messageTypesSent.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
785 cn.stats.wroteMsg(msg)
786 cn.t.stats.wroteMsg(msg)
789 func (cn *connection) readMsg(msg *pp.Message) {
790 cn.stats.readMsg(msg)
791 cn.t.stats.readMsg(msg)
794 func (cn *connection) wroteBytes(n int64) {
795 cn.stats.wroteBytes(n)
797 cn.t.stats.wroteBytes(n)
801 func (cn *connection) readBytes(n int64) {
802 cn.stats.readBytes(n)
804 cn.t.stats.readBytes(n)
808 // Returns whether the connection could be useful to us. We're seeding and
809 // they want data, we don't have metainfo and they can provide it, etc.
810 func (c *connection) useful() bool {
812 if c.closed.IsSet() {
816 return c.supportsExtension("ut_metadata")
818 if t.seeding() && c.PeerInterested {
821 if c.peerHasWantedPieces() {
827 func (c *connection) lastHelpful() (ret time.Time) {
828 ret = c.lastUsefulChunkReceived
829 if c.t.seeding() && c.lastChunkSent.After(ret) {
830 ret = c.lastChunkSent
835 func (c *connection) fastEnabled() bool {
836 return c.PeerExtensionBytes.SupportsFast() && c.t.cl.extensionBytes.SupportsFast()
839 // Returns true if we were able to reject the request.
840 func (c *connection) reject(r request) bool {
841 if !c.fastEnabled() {
845 c.Post(r.ToMsg(pp.Reject))
849 func (c *connection) onReadRequest(r request) error {
850 requestedChunkLengths.Add(strconv.FormatUint(r.Length.Uint64(), 10), 1)
851 if r.Begin+r.Length > c.t.pieceLength(int(r.Index)) {
852 return errors.New("bad request")
854 if _, ok := c.PeerRequests[r]; ok {
861 if len(c.PeerRequests) >= maxRequests {
865 if !c.t.havePiece(r.Index.Int()) {
866 // This isn't necessarily them screwing up. We can drop pieces
867 // from our storage, and can't communicate this to peers
868 // except by reconnecting.
869 requestsReceivedForMissingPieces.Add(1)
870 return fmt.Errorf("peer requested piece we don't have: %v", r.Index.Int())
872 if c.PeerRequests == nil {
873 c.PeerRequests = make(map[request]struct{}, maxRequests)
875 c.PeerRequests[r] = struct{}{}
880 // Processes incoming bittorrent messages. The client lock is held upon entry
881 // and exit. Returning will end the connection.
882 func (c *connection) mainReadLoop() error {
886 decoder := pp.Decoder{
887 R: bufio.NewReaderSize(c.r, 1<<17),
888 MaxLength: 256 * 1024,
899 err = decoder.Decode(&msg)
901 if cl.closed.IsSet() || c.closed.IsSet() || err == io.EOF {
908 c.lastMessageReceived = time.Now()
910 receivedKeepalives.Add(1)
913 messageTypesReceived.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
917 c.deleteAllRequests()
918 // We can then reset our interest.
921 if c.deleteRequest(newRequestFromMessage(&msg)) {
928 c.PeerInterested = true
930 case pp.NotInterested:
931 c.PeerInterested = false
934 err = c.peerSentHave(int(msg.Index))
936 r := newRequestFromMessage(&msg)
937 err = c.onReadRequest(r)
939 req := newRequestFromMessage(&msg)
940 if !c.PeerCancel(req) {
941 unexpectedCancels.Add(1)
944 err = c.peerSentBitfield(msg.Bitfield)
946 err = c.onPeerSentHaveAll()
948 err = c.peerSentHaveNone()
951 if len(msg.Piece) == int(t.chunkSize) {
952 t.chunkPool.Put(&msg.Piece)
955 err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload)
960 pingAddr, err := net.ResolveUDPAddr("", c.remoteAddr().String())
965 pingAddr.Port = int(msg.Port)
967 go cl.dHT.Ping(pingAddr, nil)
969 err = fmt.Errorf("received unknown message type: %#v", msg.Type)
977 func (c *connection) onReadExtendedMsg(id byte, payload []byte) (err error) {
979 // TODO: Should we still do this?
981 // These clients use their own extension IDs for outgoing message
982 // types, which is incorrect.
983 if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) || strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
991 case pp.HandshakeExtendedID:
992 // TODO: Create a bencode struct for this.
993 var d map[string]interface{}
994 err := bencode.Unmarshal(payload, &d)
996 return fmt.Errorf("error decoding extended message payload: %s", err)
998 // log.Printf("got handshake from %q: %#v", c.Socket.RemoteAddr().String(), d)
999 if reqq, ok := d["reqq"]; ok {
1000 if i, ok := reqq.(int64); ok {
1001 c.PeerMaxRequests = int(i)
1004 if v, ok := d["v"]; ok {
1005 c.PeerClientName = v.(string)
1007 if m, ok := d["m"]; ok {
1008 mTyped, ok := m.(map[string]interface{})
1010 return errors.New("handshake m value is not dict")
1012 if c.PeerExtensionIDs == nil {
1013 c.PeerExtensionIDs = make(map[string]byte, len(mTyped))
1015 for name, v := range mTyped {
1018 log.Printf("bad handshake m item extension ID type: %T", v)
1022 delete(c.PeerExtensionIDs, name)
1024 if c.PeerExtensionIDs[name] == 0 {
1025 supportedExtensionMessages.Add(name, 1)
1027 c.PeerExtensionIDs[name] = byte(id)
1031 metadata_sizeUntyped, ok := d["metadata_size"]
1033 metadata_size, ok := metadata_sizeUntyped.(int64)
1035 log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
1037 err = t.setMetadataSize(metadata_size)
1039 return fmt.Errorf("error setting metadata size to %d", metadata_size)
1043 if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
1044 c.requestPendingMetadata()
1047 case metadataExtendedId:
1048 err := cl.gotMetadataExtensionMsg(payload, t, c)
1050 return fmt.Errorf("error handling metadata extension message: %s", err)
1054 if cl.config.DisablePEX {
1057 var pexMsg peerExchangeMessage
1058 err := bencode.Unmarshal(payload, &pexMsg)
1060 return fmt.Errorf("error unmarshalling PEX message: %s", err)
1064 t.addPeers(func() (ret []Peer) {
1065 for i, cp := range pexMsg.Added {
1067 IP: make([]byte, 4),
1069 Source: peerSourcePEX,
1071 if i < len(pexMsg.AddedFlags) && pexMsg.AddedFlags[i]&0x01 != 0 {
1072 p.SupportsEncryption = true
1074 missinggo.CopyExact(p.IP, cp.IP[:])
1075 ret = append(ret, p)
1083 return fmt.Errorf("unexpected extended message ID: %v", id)
1087 // Set both the Reader and Writer for the connection from a single ReadWriter.
1088 func (cn *connection) setRW(rw io.ReadWriter) {
1093 // Returns the Reader and Writer as a combined ReadWriter.
1094 func (cn *connection) rw() io.ReadWriter {
1101 // Handle a received chunk from a peer.
1102 func (c *connection) receiveChunk(msg *pp.Message) {
1105 chunksReceived.Add(1)
1107 req := newRequestFromMessage(msg)
1109 // Request has been satisfied.
1110 if c.deleteRequest(req) {
1113 unexpectedChunksReceived.Add(1)
1116 // Do we actually want this chunk?
1117 if !t.wantPiece(req) {
1118 unwantedChunksReceived.Add(1)
1119 c.stats.ChunksReadUnwanted++
1120 c.t.stats.ChunksReadUnwanted++
1124 index := int(req.Index)
1125 piece := &t.pieces[index]
1127 c.stats.ChunksReadUseful++
1128 c.t.stats.ChunksReadUseful++
1129 c.stats.BytesReadUsefulData += int64(len(msg.Piece))
1130 c.t.stats.BytesReadUsefulData += int64(len(msg.Piece))
1131 c.lastUsefulChunkReceived = time.Now()
1132 // if t.fastestConn != c {
1133 // log.Printf("setting fastest connection %p", c)
1137 // Need to record that it hasn't been written yet, before we attempt to do
1138 // anything with it.
1139 piece.incrementPendingWrites()
1140 // Record that we have the chunk, so we aren't trying to download it while
1141 // waiting for it to be written to storage.
1142 piece.unpendChunkIndex(chunkIndex(req.chunkSpec, t.chunkSize))
1144 // Cancel pending requests for this chunk.
1145 for c := range t.conns {
1149 err := func() error {
1152 // Write the chunk out. Note that the upper bound on chunk writing
1153 // concurrency will be the number of connections. We write inline with
1154 // receiving the chunk (with this lock dance), because we want to
1155 // handle errors synchronously and I haven't thought of a nice way to
1156 // defer any concurrency to the storage and have that notify the
1157 // client of errors. TODO: Do that instead.
1158 return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
1161 piece.decrementPendingWrites()
1164 log.Printf("%s (%s): error writing chunk %v: %s", t, t.infoHash, req, err)
1166 t.updatePieceCompletion(int(msg.Index))
1170 // It's important that the piece is potentially queued before we check if
1171 // the piece is still wanted, because if it is queued, it won't be wanted.
1172 if t.pieceAllDirty(index) {
1173 t.queuePieceCheck(int(req.Index))
1174 t.pendAllChunkSpecs(index)
1177 c.onDirtiedPiece(index)
1179 cl.event.Broadcast()
1180 t.publishPieceChange(int(req.Index))
1183 func (c *connection) onDirtiedPiece(piece int) {
1184 if c.peerTouchedPieces == nil {
1185 c.peerTouchedPieces = make(map[int]struct{})
1187 c.peerTouchedPieces[piece] = struct{}{}
1188 ds := &c.t.pieces[piece].dirtiers
1190 *ds = make(map[*connection]struct{})
1192 (*ds)[c] = struct{}{}
1195 func (c *connection) uploadAllowed() bool {
1196 if c.t.cl.config.NoUpload {
1202 if !c.peerHasWantedPieces() {
1205 // Don't upload more than 100 KiB more than we download.
1206 if c.stats.BytesWrittenData >= c.stats.BytesReadData+100<<10 {
1212 func (c *connection) setRetryUploadTimer(delay time.Duration) {
1213 if c.uploadTimer == nil {
1214 c.uploadTimer = time.AfterFunc(delay, c.writerCond.Broadcast)
1216 c.uploadTimer.Reset(delay)
1220 // Also handles choking and unchoking of the remote peer.
1221 func (c *connection) upload(msg func(pp.Message) bool) bool {
1222 // Breaking or completing this loop means we don't want to upload to the
1223 // peer anymore, and we choke them.
1225 for c.uploadAllowed() {
1226 // We want to upload to the peer.
1227 if !c.Unchoke(msg) {
1230 for r := range c.PeerRequests {
1231 res := c.t.cl.uploadLimit.ReserveN(time.Now(), int(r.Length))
1233 panic(fmt.Sprintf("upload rate limiter burst size < %d", r.Length))
1235 delay := res.Delay()
1238 c.setRetryUploadTimer(delay)
1239 // Hard to say what to return here.
1242 more, err := c.sendChunk(r, msg)
1245 if c.t.pieceComplete(i) {
1246 c.t.updatePieceCompletion(i)
1247 if !c.t.pieceComplete(i) {
1248 // We had the piece, but not anymore.
1252 log.Str("error sending chunk to peer").AddValues(c, r, err).Log(c.t.logger)
1253 // If we failed to send a chunk, choke the peer to ensure they
1254 // flush all their requests. We've probably dropped a piece,
1255 // but there's no way to communicate this to the peer. If they
1256 // ask for it again, we'll kick them to allow us to send them
1257 // an updated bitfield.
1260 delete(c.PeerRequests, r)
1271 func (cn *connection) Drop() {
1272 cn.t.dropConnection(cn)
1275 func (cn *connection) netGoodPiecesDirtied() int64 {
1276 return cn.stats.PiecesDirtiedGood - cn.stats.PiecesDirtiedBad
1279 func (c *connection) peerHasWantedPieces() bool {
1280 return !c.pieceRequestOrder.IsEmpty()
1283 func (c *connection) numLocalRequests() int {
1284 return len(c.requests)
1287 func (c *connection) deleteRequest(r request) bool {
1288 if _, ok := c.requests[r]; !ok {
1291 delete(c.requests, r)
1292 c.t.pendingRequests[r]--
1297 func (c *connection) deleteAllRequests() {
1298 for r := range c.requests {
1301 // for c := range c.t.conns {
1306 func (c *connection) tickleWriter() {
1307 c.writerCond.Broadcast()
1310 func (c *connection) postCancel(r request) bool {
1311 if !c.deleteRequest(r) {
1314 c.Post(makeCancelMessage(r))
1318 func (c *connection) sendChunk(r request, msg func(pp.Message) bool) (more bool, err error) {
1319 // Count the chunk being sent, even if it isn't.
1320 b := make([]byte, r.Length)
1321 p := c.t.info.Piece(int(r.Index))
1322 n, err := c.t.readAt(b, p.Offset()+int64(r.Begin))
1325 panic("expected error")
1328 } else if err == io.EOF {
1331 more = msg(pp.Message{
1337 uploadChunksPosted.Add(1)
1338 c.lastChunkSent = time.Now()
1342 func (c *connection) setTorrent(t *Torrent) {
1344 panic("connection already associated with a torrent")
1347 t.conns[c] = struct{}{}