17 "github.com/anacrolix/missinggo"
18 "github.com/anacrolix/missinggo/bitmap"
19 "github.com/anacrolix/missinggo/iter"
20 "github.com/anacrolix/missinggo/prioritybitmap"
22 "github.com/anacrolix/torrent/bencode"
23 "github.com/anacrolix/torrent/mse"
24 pp "github.com/anacrolix/torrent/peer_protocol"
27 type peerSource string
30 peerSourceTracker = "T" // It's the default.
31 peerSourceIncoming = "I"
32 peerSourceDHTGetPeers = "Hg"
33 peerSourceDHTAnnouncePeer = "Ha"
37 // Maintains the state of a connection with a peer.
38 type connection struct {
40 // The actual Conn, used for closing, and setting socket options.
42 // The Reader and Writer for this Conn, with hooks installed for stats,
43 // limiting, deadlines etc.
46 // True if the connection is operating over MSE obfuscation.
51 closed missinggo.Event
55 lastMessageReceived time.Time
56 completedHandshake time.Time
57 lastUsefulChunkReceived time.Time
58 lastChunkSent time.Time
60 // Stuff controlled by the local peer.
63 requests map[request]struct{}
65 // Indexed by metadata piece, set to true if posted and pending a
67 metadataRequests []bool
70 // Stuff controlled by the remote peer.
74 PeerRequests map[request]struct{}
75 PeerExtensionBytes peerExtensionBytes
76 // The pieces the peer has claimed to have.
77 peerPieces bitmap.Bitmap
78 // The peer has everything. This can occur due to a special message, when
79 // we may not even know the number of pieces in the torrent yet.
81 // The highest possible number of pieces the torrent could have based on
82 // communication with the peer. Generally only useful until we have the
85 // Pieces we've accepted chunks for from the peer.
86 peerTouchedPieces map[int]struct{}
88 PeerMaxRequests int // Maximum pending requests the peer allows.
89 PeerExtensionIDs map[string]byte
92 pieceInclination []int
93 pieceRequestOrder prioritybitmap.PriorityBitmap
95 postedBuffer bytes.Buffer
96 uploadTimer *time.Timer
100 func (cn *connection) peerHasAllPieces() (all bool, known bool) {
101 if cn.peerSentHaveAll {
104 if !cn.t.haveInfo() {
107 return bitmap.Flip(cn.peerPieces, 0, cn.t.numPieces()).IsEmpty(), true
110 func (cn *connection) mu() sync.Locker {
114 func (cn *connection) remoteAddr() net.Addr {
115 return cn.conn.RemoteAddr()
118 func (cn *connection) localAddr() net.Addr {
119 return cn.conn.LocalAddr()
122 func (cn *connection) supportsExtension(ext string) bool {
123 _, ok := cn.PeerExtensionIDs[ext]
127 // The best guess at number of pieces in the torrent for this peer.
128 func (cn *connection) bestPeerNumPieces() int {
130 return cn.t.numPieces()
132 return cn.peerMinPieces
135 func (cn *connection) completedString() string {
136 return fmt.Sprintf("%d/%d", cn.peerPieces.Len(), cn.bestPeerNumPieces())
139 // Correct the PeerPieces slice length. Return false if the existing slice is
140 // invalid, such as by receiving badly sized BITFIELD, or invalid HAVE
142 func (cn *connection) setNumPieces(num int) error {
143 cn.peerPieces.RemoveRange(num, -1)
144 cn.peerPiecesChanged()
148 func eventAgeString(t time.Time) string {
152 return fmt.Sprintf("%.2fs ago", time.Since(t).Seconds())
155 func (cn *connection) connectionFlags() (ret string) {
157 ret += string([]byte{b})
159 if cn.cryptoMethod == mse.CryptoMethodRC4 {
161 } else if cn.headerEncrypted {
164 ret += string(cn.Discovery)
171 // Inspired by https://trac.transmissionbt.com/wiki/PeerStatusText
172 func (cn *connection) statusFlags() (ret string) {
174 ret += string([]byte{b})
183 ret += cn.connectionFlags()
185 if cn.PeerInterested {
194 func (cn *connection) String() string {
196 cn.WriteStatus(&buf, nil)
200 func (cn *connection) WriteStatus(w io.Writer, t *Torrent) {
201 // \t isn't preserved in <pre> blocks?
202 fmt.Fprintf(w, "%-40s: %s-%s\n", cn.PeerID, cn.localAddr(), cn.remoteAddr())
203 fmt.Fprintf(w, " last msg: %s, connected: %s, last helpful: %s\n",
204 eventAgeString(cn.lastMessageReceived),
205 eventAgeString(cn.completedHandshake),
206 eventAgeString(cn.lastHelpful()))
208 " %s completed, %d pieces touched, good chunks: %d/%d-%d reqq: %d-%d, flags: %s\n",
209 cn.completedString(),
210 len(cn.peerTouchedPieces),
211 cn.stats.ChunksReadUseful,
212 cn.stats.ChunksReadUnwanted+cn.stats.ChunksReadUseful,
213 cn.stats.ChunksWritten,
214 cn.numLocalRequests(),
215 len(cn.PeerRequests),
218 roi := cn.pieceRequestOrderIter()
219 fmt.Fprintf(w, " next pieces: %v%s\n",
220 iter.ToSlice(iter.Head(10, roi)),
222 if cn.shouldRequestWithoutBias() {
230 func (cn *connection) Close() {
231 if !cn.closed.Set() {
234 cn.discardPieceInclination()
235 cn.pieceRequestOrder.Clear()
241 func (cn *connection) PeerHasPiece(piece int) bool {
242 return cn.peerSentHaveAll || cn.peerPieces.Contains(piece)
245 func (cn *connection) Post(msg pp.Message) {
246 messageTypesPosted.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
247 cn.postedBuffer.Write(msg.MustMarshalBinary())
251 func (cn *connection) requestMetadataPiece(index int) {
252 eID := cn.PeerExtensionIDs["ut_metadata"]
256 if index < len(cn.metadataRequests) && cn.metadataRequests[index] {
262 ExtendedPayload: func() []byte {
263 b, err := bencode.Marshal(map[string]int{
264 "msg_type": pp.RequestMetadataExtensionMsgType,
273 for index >= len(cn.metadataRequests) {
274 cn.metadataRequests = append(cn.metadataRequests, false)
276 cn.metadataRequests[index] = true
279 func (cn *connection) requestedMetadataPiece(index int) bool {
280 return index < len(cn.metadataRequests) && cn.metadataRequests[index]
283 // The actual value to use as the maximum outbound requests.
284 func (cn *connection) nominalMaxRequests() (ret int) {
285 ret = cn.PeerMaxRequests
292 // Returns true if an unsatisfied request was canceled.
293 func (cn *connection) PeerCancel(r request) bool {
294 if cn.PeerRequests == nil {
297 if _, ok := cn.PeerRequests[r]; !ok {
300 delete(cn.PeerRequests, r)
304 func (cn *connection) Choke(msg func(pp.Message) bool) bool {
308 cn.PeerRequests = nil
310 return msg(pp.Message{
315 func (cn *connection) Unchoke(msg func(pp.Message) bool) bool {
320 return msg(pp.Message{
325 func (cn *connection) SetInterested(interested bool, msg func(pp.Message) bool) bool {
326 if cn.Interested == interested {
329 cn.Interested = interested
330 // log.Printf("%p: setting interest: %v", cn, interested)
331 return msg(pp.Message{
332 Type: func() pp.MessageType {
336 return pp.NotInterested
342 // The function takes a message to be sent, and returns true if more messages
344 type messageWriter func(pp.Message) bool
346 // Proxies the messageWriter's response.
347 func (cn *connection) request(r request, mw messageWriter) bool {
348 if cn.requests == nil {
349 cn.requests = make(map[request]struct{}, cn.nominalMaxRequests())
351 if _, ok := cn.requests[r]; ok {
352 panic("chunk already requested")
354 if !cn.PeerHasPiece(r.Index.Int()) {
355 panic("requesting piece peer doesn't have")
357 cn.requests[r] = struct{}{}
358 if _, ok := cn.t.conns[cn]; !ok {
359 panic("requesting but not in active conns")
361 cn.t.pendingRequests[r]++
362 return mw(pp.Message{
370 func (cn *connection) fillWriteBuffer(msg func(pp.Message) bool) {
371 numFillBuffers.Add(1)
372 cancel, new, i := cn.desiredRequestState()
373 if !cn.SetInterested(i, msg) {
376 if cancel && len(cn.requests) != 0 {
377 fillBufferSentCancels.Add(1)
378 for r := range cn.requests {
380 // log.Printf("%p: cancelling request: %v", cn, r)
381 if !msg(makeCancelMessage(r)) {
387 fillBufferSentRequests.Add(1)
388 for _, r := range new {
389 if !cn.request(r, msg) {
390 // If we didn't completely top up the requests, we shouldn't
391 // mark the low water, since we'll want to top up the requests
392 // as soon as we have more write buffer space.
396 cn.requestsLowWater = len(cn.requests) / 2
401 // Routine that writes to the peer. Some of what to write is buffered by
402 // activity elsewhere in the Client, and some is determined locally when the
403 // connection is writable.
404 func (cn *connection) writer(keepAliveTimeout time.Duration) {
407 lastWrite time.Time = time.Now()
409 var keepAliveTimer *time.Timer
410 keepAliveTimer = time.AfterFunc(keepAliveTimeout, func() {
412 defer cn.mu().Unlock()
413 if time.Since(lastWrite) >= keepAliveTimeout {
416 keepAliveTimer.Reset(keepAliveTimeout)
419 defer cn.mu().Unlock()
421 defer keepAliveTimer.Stop()
423 if cn.closed.IsSet() {
426 buf.Write(cn.postedBuffer.Bytes())
427 cn.postedBuffer.Reset()
429 cn.fillWriteBuffer(func(msg pp.Message) bool {
431 buf.Write(msg.MustMarshalBinary())
432 return buf.Len() < 1<<16
435 if buf.Len() == 0 && time.Since(lastWrite) >= keepAliveTimeout {
436 buf.Write(pp.Message{Keepalive: true}.MustMarshalBinary())
437 postedKeepalives.Add(1)
440 // TODO: Minimize wakeups....
445 // log.Printf("writing %d bytes", buf.Len())
446 n, err := cn.w.Write(buf.Bytes())
449 lastWrite = time.Now()
450 keepAliveTimer.Reset(keepAliveTimeout)
462 func (cn *connection) Have(piece int) {
463 for piece >= len(cn.sentHaves) {
464 cn.sentHaves = append(cn.sentHaves, false)
466 if cn.sentHaves[piece] {
471 Index: pp.Integer(piece),
473 cn.sentHaves[piece] = true
476 func (cn *connection) Bitfield(haves []bool) {
477 if cn.sentHaves != nil {
478 panic("bitfield must be first have-related message sent")
484 // Make a copy of haves, as that's read when the message is marshalled
485 // without the lock. Also it obviously shouldn't change in the Msg due to
486 // changes in .sentHaves.
487 cn.sentHaves = append([]bool(nil), haves...)
490 // Determines interest and requests to send to a connected peer.
491 func nextRequestState(
492 networkingEnabled bool,
493 currentRequests map[request]struct{},
495 iterPendingRequests func(f func(request) bool),
496 requestsLowWater int,
497 requestsHighWater int,
499 cancelExisting bool, // Cancel all our pending requests
500 newRequests []request, // Chunks to request that we currently aren't
501 interested bool, // Whether we should indicate interest, even if we don't request anything
503 if !networkingEnabled {
504 return true, nil, false
506 if len(currentRequests) > requestsLowWater {
507 return false, nil, true
509 iterPendingRequests(func(r request) bool {
514 if _, ok := currentRequests[r]; !ok {
515 if newRequests == nil {
516 newRequests = make([]request, 0, requestsHighWater-len(currentRequests))
518 newRequests = append(newRequests, r)
520 return len(currentRequests)+len(newRequests) < requestsHighWater
525 func (cn *connection) updateRequests() {
526 // log.Print("update requests")
530 // Emits the indices in the Bitmaps bms in order, never repeating any index.
531 // skip is mutated during execution, and its initial values will never be
533 func iterBitmapsDistinct(skip bitmap.Bitmap, bms ...bitmap.Bitmap) iter.Func {
534 return func(cb iter.Callback) {
535 for _, bm := range bms {
536 if !iter.All(func(i interface{}) bool {
539 }, bitmap.Sub(bm, skip).Iter) {
546 func (cn *connection) unbiasedPieceRequestOrder() iter.Func {
547 now, readahead := cn.t.readerPiecePriorities()
548 // Pieces to skip include pieces the peer doesn't have
549 skip := bitmap.Flip(cn.peerPieces, 0, cn.t.numPieces())
550 // And pieces that we already have.
551 skip.Union(cn.t.completedPieces)
552 // Return an iterator over the different priority classes, minus the skip
555 iterBitmapsDistinct(skip, now, readahead),
556 func(cb iter.Callback) {
557 cn.t.pendingPieces.IterTyped(func(piece int) bool {
558 if skip.Contains(piece) {
569 // The connection should download highest priority pieces first, without any
570 // inclination toward avoiding wastage. Generally we might do this if there's
571 // a single connection, or this is the fastest connection, and we have active
572 // readers that signal an ordering preference. It's conceivable that the best
573 // connection should do this, since it's least likely to waste our time if
574 // assigned to the highest priority pieces, and assigning more than one this
575 // role would cause significant wasted bandwidth.
576 func (cn *connection) shouldRequestWithoutBias() bool {
577 if cn.t.requestStrategy != 2 {
580 if len(cn.t.readers) == 0 {
583 if len(cn.t.conns) == 1 {
586 if cn == cn.t.fastestConn {
592 func (cn *connection) pieceRequestOrderIter() iter.Func {
593 if cn.shouldRequestWithoutBias() {
594 return cn.unbiasedPieceRequestOrder()
596 return cn.pieceRequestOrder.Iter
600 func (cn *connection) iterPendingRequests(f func(request) bool) {
601 cn.pieceRequestOrderIter()(func(_piece interface{}) bool {
602 piece := _piece.(int)
603 return iterUndirtiedChunks(piece, cn.t, func(cs chunkSpec) bool {
604 r := request{pp.Integer(piece), cs}
605 // log.Println(r, cn.t.pendingRequests[r], cn.requests)
606 // if _, ok := cn.requests[r]; !ok && cn.t.pendingRequests[r] != 0 {
614 func (cn *connection) desiredRequestState() (bool, []request, bool) {
615 return nextRequestState(
616 cn.t.networkingEnabled,
619 cn.iterPendingRequests,
621 cn.nominalMaxRequests(),
625 func iterUndirtiedChunks(piece int, t *Torrent, f func(chunkSpec) bool) bool {
626 chunkIndices := t.pieces[piece].undirtiedChunkIndices().ToSortedSlice()
627 // TODO: Use "math/rand".Shuffle >= Go 1.10
628 return iter.ForPerm(len(chunkIndices), func(i int) bool {
629 return f(t.chunkIndexSpec(chunkIndices[i], piece))
633 // check callers updaterequests
634 func (cn *connection) stopRequestingPiece(piece int) bool {
635 return cn.pieceRequestOrder.Remove(piece)
638 // This is distinct from Torrent piece priority, which is the user's
639 // preference. Connection piece priority is specific to a connection and is
640 // used to pseudorandomly avoid connections always requesting the same pieces
641 // and thus wasting effort.
642 func (cn *connection) updatePiecePriority(piece int) bool {
643 tpp := cn.t.piecePriority(piece)
644 if !cn.PeerHasPiece(piece) {
645 tpp = PiecePriorityNone
647 if tpp == PiecePriorityNone {
648 return cn.stopRequestingPiece(piece)
650 prio := cn.getPieceInclination()[piece]
651 switch cn.t.requestStrategy {
654 case PiecePriorityNormal:
655 case PiecePriorityReadahead:
656 prio -= cn.t.numPieces()
657 case PiecePriorityNext, PiecePriorityNow:
658 prio -= 2 * cn.t.numPieces()
665 return cn.pieceRequestOrder.Set(piece, prio) || cn.shouldRequestWithoutBias()
668 func (cn *connection) getPieceInclination() []int {
669 if cn.pieceInclination == nil {
670 cn.pieceInclination = cn.t.getConnPieceInclination()
672 return cn.pieceInclination
675 func (cn *connection) discardPieceInclination() {
676 if cn.pieceInclination == nil {
679 cn.t.putPieceInclination(cn.pieceInclination)
680 cn.pieceInclination = nil
683 func (cn *connection) peerPiecesChanged() {
685 prioritiesChanged := false
686 for i := range iter.N(cn.t.numPieces()) {
687 if cn.updatePiecePriority(i) {
688 prioritiesChanged = true
691 if prioritiesChanged {
697 func (cn *connection) raisePeerMinPieces(newMin int) {
698 if newMin > cn.peerMinPieces {
699 cn.peerMinPieces = newMin
703 func (cn *connection) peerSentHave(piece int) error {
704 if cn.t.haveInfo() && piece >= cn.t.numPieces() || piece < 0 {
705 return errors.New("invalid piece")
707 if cn.PeerHasPiece(piece) {
710 cn.raisePeerMinPieces(piece + 1)
711 cn.peerPieces.Set(piece, true)
712 if cn.updatePiecePriority(piece) {
718 func (cn *connection) peerSentBitfield(bf []bool) error {
719 cn.peerSentHaveAll = false
721 panic("expected bitfield length divisible by 8")
723 // We know that the last byte means that at most the last 7 bits are
725 cn.raisePeerMinPieces(len(bf) - 7)
726 if cn.t.haveInfo() && len(bf) > cn.t.numPieces() {
727 // Ignore known excess pieces.
728 bf = bf[:cn.t.numPieces()]
730 for i, have := range bf {
732 cn.raisePeerMinPieces(i + 1)
734 cn.peerPieces.Set(i, have)
736 cn.peerPiecesChanged()
740 func (cn *connection) onPeerSentHaveAll() error {
741 cn.peerSentHaveAll = true
742 cn.peerPieces.Clear()
743 cn.peerPiecesChanged()
747 func (cn *connection) peerSentHaveNone() error {
748 cn.peerPieces.Clear()
749 cn.peerSentHaveAll = false
750 cn.peerPiecesChanged()
754 func (c *connection) requestPendingMetadata() {
758 if c.PeerExtensionIDs["ut_metadata"] == 0 {
759 // Peer doesn't support this.
762 // Request metadata pieces that we don't have in a random order.
764 for index := 0; index < c.t.metadataPieceCount(); index++ {
765 if !c.t.haveMetadataPiece(index) && !c.requestedMetadataPiece(index) {
766 pending = append(pending, index)
769 for _, i := range rand.Perm(len(pending)) {
770 c.requestMetadataPiece(pending[i])
774 func (cn *connection) wroteMsg(msg *pp.Message) {
775 messageTypesSent.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
776 cn.stats.wroteMsg(msg)
777 cn.t.stats.wroteMsg(msg)
780 func (cn *connection) readMsg(msg *pp.Message) {
781 cn.stats.readMsg(msg)
782 cn.t.stats.readMsg(msg)
785 func (cn *connection) wroteBytes(n int64) {
786 cn.stats.wroteBytes(n)
788 cn.t.stats.wroteBytes(n)
792 func (cn *connection) readBytes(n int64) {
793 cn.stats.readBytes(n)
795 cn.t.stats.readBytes(n)
799 // Returns whether the connection could be useful to us. We're seeding and
800 // they want data, we don't have metainfo and they can provide it, etc.
801 func (c *connection) useful() bool {
803 if c.closed.IsSet() {
807 return c.supportsExtension("ut_metadata")
809 if t.seeding() && c.PeerInterested {
812 if c.peerHasWantedPieces() {
818 func (c *connection) lastHelpful() (ret time.Time) {
819 ret = c.lastUsefulChunkReceived
820 if c.t.seeding() && c.lastChunkSent.After(ret) {
821 ret = c.lastChunkSent
826 // Processes incoming bittorrent messages. The client lock is held upon entry
827 // and exit. Returning will end the connection.
828 func (c *connection) mainReadLoop() error {
832 decoder := pp.Decoder{
833 R: bufio.NewReaderSize(c.r, 1<<17),
834 MaxLength: 256 * 1024,
845 err = decoder.Decode(&msg)
847 if cl.closed.IsSet() || c.closed.IsSet() || err == io.EOF {
854 c.lastMessageReceived = time.Now()
856 receivedKeepalives.Add(1)
859 messageTypesReceived.Add(strconv.FormatInt(int64(msg.Type), 10), 1)
863 c.deleteAllRequests()
864 // We can then reset our interest.
867 if c.deleteRequest(newRequest(msg.Index, msg.Begin, msg.Length)) {
874 c.PeerInterested = true
876 case pp.NotInterested:
877 c.PeerInterested = false
880 err = c.peerSentHave(int(msg.Index))
882 requestedChunkLengths.Add(strconv.FormatUint(msg.Length.Uint64(), 10), 1)
886 if len(c.PeerRequests) >= maxRequests {
887 // TODO: Should we drop them or Choke them instead?
890 if !t.havePiece(msg.Index.Int()) {
891 // This isn't necessarily them screwing up. We can drop pieces
892 // from our storage, and can't communicate this to peers
893 // except by reconnecting.
894 requestsReceivedForMissingPieces.Add(1)
895 err = fmt.Errorf("peer requested piece we don't have: %v", msg.Index.Int())
898 if c.PeerRequests == nil {
899 c.PeerRequests = make(map[request]struct{}, maxRequests)
901 c.PeerRequests[newRequest(msg.Index, msg.Begin, msg.Length)] = struct{}{}
904 req := newRequest(msg.Index, msg.Begin, msg.Length)
905 if !c.PeerCancel(req) {
906 unexpectedCancels.Add(1)
909 err = c.peerSentBitfield(msg.Bitfield)
911 err = c.onPeerSentHaveAll()
913 err = c.peerSentHaveNone()
916 if len(msg.Piece) == int(t.chunkSize) {
917 t.chunkPool.Put(&msg.Piece)
920 switch msg.ExtendedID {
921 case pp.HandshakeExtendedID:
922 // TODO: Create a bencode struct for this.
923 var d map[string]interface{}
924 err = bencode.Unmarshal(msg.ExtendedPayload, &d)
926 err = fmt.Errorf("error decoding extended message payload: %s", err)
929 // log.Printf("got handshake from %q: %#v", c.Socket.RemoteAddr().String(), d)
930 if reqq, ok := d["reqq"]; ok {
931 if i, ok := reqq.(int64); ok {
932 c.PeerMaxRequests = int(i)
935 if v, ok := d["v"]; ok {
936 c.PeerClientName = v.(string)
938 if m, ok := d["m"]; ok {
939 mTyped, ok := m.(map[string]interface{})
941 err = errors.New("handshake m value is not dict")
944 if c.PeerExtensionIDs == nil {
945 c.PeerExtensionIDs = make(map[string]byte, len(mTyped))
947 for name, v := range mTyped {
950 log.Printf("bad handshake m item extension ID type: %T", v)
954 delete(c.PeerExtensionIDs, name)
956 if c.PeerExtensionIDs[name] == 0 {
957 supportedExtensionMessages.Add(name, 1)
959 c.PeerExtensionIDs[name] = byte(id)
963 metadata_sizeUntyped, ok := d["metadata_size"]
965 metadata_size, ok := metadata_sizeUntyped.(int64)
967 log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
969 err = t.setMetadataSize(metadata_size)
971 err = fmt.Errorf("error setting metadata size to %d", metadata_size)
976 if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
977 c.requestPendingMetadata()
979 case metadataExtendedId:
980 err = cl.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
982 err = fmt.Errorf("error handling metadata extension message: %s", err)
985 if cl.config.DisablePEX {
988 var pexMsg peerExchangeMessage
989 err = bencode.Unmarshal(msg.ExtendedPayload, &pexMsg)
991 err = fmt.Errorf("error unmarshalling PEX message: %s", err)
996 t.addPeers(func() (ret []Peer) {
997 for i, cp := range pexMsg.Added {
1001 Source: peerSourcePEX,
1003 if i < len(pexMsg.AddedFlags) && pexMsg.AddedFlags[i]&0x01 != 0 {
1004 p.SupportsEncryption = true
1006 missinggo.CopyExact(p.IP, cp.IP[:])
1007 ret = append(ret, p)
1014 err = fmt.Errorf("unexpected extended message ID: %v", msg.ExtendedID)
1017 // That client uses its own extension IDs for outgoing message
1018 // types, which is incorrect.
1019 if bytes.HasPrefix(c.PeerID[:], []byte("-SD0100-")) ||
1020 strings.HasPrefix(string(c.PeerID[:]), "-XL0012-") {
1028 pingAddr, err := net.ResolveUDPAddr("", c.remoteAddr().String())
1033 pingAddr.Port = int(msg.Port)
1035 go cl.dHT.Ping(pingAddr, nil)
1037 err = fmt.Errorf("received unknown message type: %#v", msg.Type)
1045 // Set both the Reader and Writer for the connection from a single ReadWriter.
1046 func (cn *connection) setRW(rw io.ReadWriter) {
1051 // Returns the Reader and Writer as a combined ReadWriter.
1052 func (cn *connection) rw() io.ReadWriter {
1059 // Handle a received chunk from a peer.
1060 func (c *connection) receiveChunk(msg *pp.Message) {
1063 chunksReceived.Add(1)
1065 req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
1067 // Request has been satisfied.
1068 if c.deleteRequest(req) {
1071 unexpectedChunksReceived.Add(1)
1074 // Do we actually want this chunk?
1075 if !t.wantPiece(req) {
1076 unwantedChunksReceived.Add(1)
1077 c.stats.ChunksReadUnwanted++
1081 index := int(req.Index)
1082 piece := &t.pieces[index]
1084 c.stats.ChunksReadUseful++
1085 c.lastUsefulChunkReceived = time.Now()
1086 // if t.fastestConn != c {
1087 // log.Printf("setting fastest connection %p", c)
1091 // Need to record that it hasn't been written yet, before we attempt to do
1092 // anything with it.
1093 piece.incrementPendingWrites()
1094 // Record that we have the chunk, so we aren't trying to download it while
1095 // waiting for it to be written to storage.
1096 piece.unpendChunkIndex(chunkIndex(req.chunkSpec, t.chunkSize))
1098 // Cancel pending requests for this chunk.
1099 for c := range t.conns {
1103 err := func() error {
1106 // Write the chunk out. Note that the upper bound on chunk writing
1107 // concurrency will be the number of connections. We write inline with
1108 // receiving the chunk (with this lock dance), because we want to
1109 // handle errors synchronously and I haven't thought of a nice way to
1110 // defer any concurrency to the storage and have that notify the
1111 // client of errors. TODO: Do that instead.
1112 return t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
1115 piece.decrementPendingWrites()
1118 log.Printf("%s (%s): error writing chunk %v: %s", t, t.infoHash, req, err)
1120 t.updatePieceCompletion(int(msg.Index))
1124 // It's important that the piece is potentially queued before we check if
1125 // the piece is still wanted, because if it is queued, it won't be wanted.
1126 if t.pieceAllDirty(index) {
1127 t.queuePieceCheck(int(req.Index))
1128 t.pendAllChunkSpecs(index)
1131 if c.peerTouchedPieces == nil {
1132 c.peerTouchedPieces = make(map[int]struct{})
1134 c.peerTouchedPieces[index] = struct{}{}
1136 cl.event.Broadcast()
1137 t.publishPieceChange(int(req.Index))
1140 func (c *connection) uploadAllowed() bool {
1141 if c.t.cl.config.NoUpload {
1147 if !c.peerHasWantedPieces() {
1150 // Don't upload more than 100 KiB more than we download.
1151 if c.stats.DataBytesWritten >= c.stats.DataBytesRead+100<<10 {
1157 func (c *connection) setRetryUploadTimer(delay time.Duration) {
1158 if c.uploadTimer == nil {
1159 c.uploadTimer = time.AfterFunc(delay, c.writerCond.Broadcast)
1161 c.uploadTimer.Reset(delay)
1165 // Also handles choking and unchoking of the remote peer.
1166 func (c *connection) upload(msg func(pp.Message) bool) bool {
1167 // Breaking or completing this loop means we don't want to upload to the
1168 // peer anymore, and we choke them.
1170 for c.uploadAllowed() {
1171 // We want to upload to the peer.
1172 if !c.Unchoke(msg) {
1175 for r := range c.PeerRequests {
1176 res := c.t.cl.uploadLimit.ReserveN(time.Now(), int(r.Length))
1178 panic(fmt.Sprintf("upload rate limiter burst size < %d", r.Length))
1180 delay := res.Delay()
1183 c.setRetryUploadTimer(delay)
1184 // Hard to say what to return here.
1187 more, err := c.sendChunk(r, msg)
1190 if c.t.pieceComplete(i) {
1191 c.t.updatePieceCompletion(i)
1192 if !c.t.pieceComplete(i) {
1193 // We had the piece, but not anymore.
1197 log.Printf("error sending chunk %+v to peer: %s", r, err)
1198 // If we failed to send a chunk, choke the peer to ensure they
1199 // flush all their requests. We've probably dropped a piece,
1200 // but there's no way to communicate this to the peer. If they
1201 // ask for it again, we'll kick them to allow us to send them
1202 // an updated bitfield.
1205 delete(c.PeerRequests, r)
1216 func (cn *connection) Drop() {
1217 cn.t.dropConnection(cn)
1220 func (cn *connection) netGoodPiecesDirtied() int64 {
1221 return cn.stats.GoodPiecesDirtied - cn.stats.BadPiecesDirtied
1224 func (c *connection) peerHasWantedPieces() bool {
1225 return !c.pieceRequestOrder.IsEmpty()
1228 func (c *connection) numLocalRequests() int {
1229 return len(c.requests)
1232 func (c *connection) deleteRequest(r request) bool {
1233 if _, ok := c.requests[r]; !ok {
1236 delete(c.requests, r)
1237 c.t.pendingRequests[r]--
1241 func (c *connection) deleteAllRequests() {
1242 for r := range c.requests {
1245 // for c := range c.t.conns {
1250 func (c *connection) tickleWriter() {
1251 c.writerCond.Broadcast()
1254 func (c *connection) postCancel(r request) bool {
1255 if !c.deleteRequest(r) {
1258 c.Post(makeCancelMessage(r))
1262 func (c *connection) sendChunk(r request, msg func(pp.Message) bool) (more bool, err error) {
1263 // Count the chunk being sent, even if it isn't.
1264 b := make([]byte, r.Length)
1265 p := c.t.info.Piece(int(r.Index))
1266 n, err := c.t.readAt(b, p.Offset()+int64(r.Begin))
1269 panic("expected error")
1272 } else if err == io.EOF {
1275 more = msg(pp.Message{
1281 uploadChunksPosted.Add(1)
1282 c.lastChunkSent = time.Now()