15 "github.com/anacrolix/libtorgo/bencode"
17 "github.com/anacrolix/torrent/internal/pieceordering"
18 pp "github.com/anacrolix/torrent/peer_protocol"
21 var optimizedCancels = expvar.NewInt("optimizedCancels")
26 peerSourceIncoming = 'I'
31 // Maintains the state of a connection with a peer.
32 type connection struct {
34 rw io.ReadWriter // The real slim shady
39 mu sync.Mutex // Only for closing.
43 // The connections preferred order to download pieces.
45 // The piece request order based on piece priorities.
46 pieceRequestOrder *pieceordering.Instance
48 UnwantedChunksReceived int
49 UsefulChunksReceived int
51 lastMessageReceived time.Time
52 completedHandshake time.Time
53 lastUsefulChunkReceived time.Time
55 // Stuff controlled by the local peer.
58 Requests map[request]struct{}
60 // Indexed by metadata piece, set to true if posted and pending a
62 metadataRequests []bool
64 // Stuff controlled by the remote peer.
68 PeerRequests map[request]struct{}
69 PeerExtensionBytes peerExtensionBytes
70 // Whether the peer has the given piece. nil if they've not sent any
71 // related messages yet.
75 PeerMaxRequests int // Maximum pending requests the peer allows.
76 PeerExtensionIDs map[string]byte
80 func newConnection() (c *connection) {
86 closing: make(chan struct{}),
87 writeCh: make(chan []byte),
88 post: make(chan pp.Message),
93 func (cn *connection) remoteAddr() net.Addr {
94 return cn.conn.RemoteAddr()
97 func (cn *connection) localAddr() net.Addr {
98 return cn.conn.LocalAddr()
101 // Adjust piece position in the request order for this connection based on the
102 // given piece priority.
103 func (cn *connection) pendPiece(piece int, priority piecePriority) {
104 if priority == piecePriorityNone {
105 cn.pieceRequestOrder.DeletePiece(piece)
108 pp := cn.piecePriorities[piece]
109 // Priority regions not to scale. Within each region, piece is randomized
110 // according to connection.
118 case piecePriorityNow:
119 return -3*len(cn.piecePriorities) + 3*pp
120 case piecePriorityNext:
121 return -2*len(cn.piecePriorities) + 2*pp
122 case piecePriorityReadahead:
123 return -len(cn.piecePriorities) + pp
124 case piecePriorityNormal:
130 cn.pieceRequestOrder.SetPiece(piece, key)
133 func (cn *connection) supportsExtension(ext string) bool {
134 _, ok := cn.PeerExtensionIDs[ext]
138 func (cn *connection) completedString(t *torrent) string {
139 if cn.PeerPieces == nil && !cn.peerHasAll {
142 return fmt.Sprintf("%d/%d", func() int {
150 for _, b := range cn.PeerPieces {
157 if cn.peerHasAll || cn.PeerPieces == nil {
163 return len(cn.PeerPieces)
167 // Correct the PeerPieces slice length. Return false if the existing slice is
168 // invalid, such as by receiving badly sized BITFIELD, or invalid HAVE
170 func (cn *connection) setNumPieces(num int) error {
174 if cn.PeerPieces == nil {
177 if len(cn.PeerPieces) == num {
178 } else if len(cn.PeerPieces) < num {
179 cn.PeerPieces = append(cn.PeerPieces, make([]bool, num-len(cn.PeerPieces))...)
180 } else if len(cn.PeerPieces) <= (num+7)/8*8 {
181 for _, have := range cn.PeerPieces[num:] {
183 return errors.New("peer has invalid piece")
186 cn.PeerPieces = cn.PeerPieces[:num]
188 return fmt.Errorf("peer bitfield is excessively long: expected %d, have %d", num, len(cn.PeerPieces))
190 if len(cn.PeerPieces) != num {
196 func eventAgeString(t time.Time) string {
200 return fmt.Sprintf("%.2fs ago", time.Now().Sub(t).Seconds())
203 // Inspired by https://trac.transmissionbt.com/wiki/PeerStatusText
204 func (cn *connection) statusFlags() (ret string) {
206 ret += string([]byte{b})
218 if cn.Discovery != 0 {
219 c(byte(cn.Discovery))
225 if cn.PeerInterested {
234 func (cn *connection) WriteStatus(w io.Writer, t *torrent) {
235 // \t isn't preserved in <pre> blocks?
236 fmt.Fprintf(w, "%q: %s-%s\n", cn.PeerID, cn.localAddr(), cn.remoteAddr())
237 fmt.Fprintf(w, " last msg: %s, connected: %s, last useful chunk: %s\n",
238 eventAgeString(cn.lastMessageReceived),
239 eventAgeString(cn.completedHandshake),
240 eventAgeString(cn.lastUsefulChunkReceived))
241 fmt.Fprintf(w, " %s completed, good chunks: %d/%d reqs: %d-%d, flags: %s\n",
242 cn.completedString(t),
243 cn.UsefulChunksReceived,
244 cn.UnwantedChunksReceived+cn.UsefulChunksReceived,
246 len(cn.PeerRequests),
250 func (c *connection) Close() {
259 // TODO: This call blocks sometimes, why?
263 func (c *connection) PeerHasPiece(piece int) bool {
267 if piece >= len(c.PeerPieces) {
270 return c.PeerPieces[piece]
273 func (c *connection) Post(msg pp.Message) {
280 func (c *connection) RequestPending(r request) bool {
281 _, ok := c.Requests[r]
285 func (c *connection) requestMetadataPiece(index int) {
286 eID := c.PeerExtensionIDs["ut_metadata"]
290 if index < len(c.metadataRequests) && c.metadataRequests[index] {
296 ExtendedPayload: func() []byte {
297 b, err := bencode.Marshal(map[string]int{
298 "msg_type": pp.RequestMetadataExtensionMsgType,
307 for index >= len(c.metadataRequests) {
308 c.metadataRequests = append(c.metadataRequests, false)
310 c.metadataRequests[index] = true
313 func (c *connection) requestedMetadataPiece(index int) bool {
314 return index < len(c.metadataRequests) && c.metadataRequests[index]
317 // Returns true if more requests can be sent.
318 func (c *connection) Request(chunk request) bool {
319 if len(c.Requests) >= c.PeerMaxRequests {
322 if !c.PeerHasPiece(int(chunk.Index)) {
325 if c.RequestPending(chunk) {
328 c.SetInterested(true)
332 if c.Requests == nil {
333 c.Requests = make(map[request]struct{}, c.PeerMaxRequests)
335 c.Requests[chunk] = struct{}{}
336 c.requestsLowWater = len(c.Requests) / 2
341 Length: chunk.Length,
346 // Returns true if an unsatisfied request was canceled.
347 func (c *connection) Cancel(r request) bool {
348 if c.Requests == nil {
351 if _, ok := c.Requests[r]; !ok {
354 delete(c.Requests, r)
364 // Returns true if an unsatisfied request was canceled.
365 func (c *connection) PeerCancel(r request) bool {
366 if c.PeerRequests == nil {
369 if _, ok := c.PeerRequests[r]; !ok {
372 delete(c.PeerRequests, r)
376 func (c *connection) Choke() {
386 func (c *connection) Unchoke() {
396 func (c *connection) SetInterested(interested bool) {
397 if c.Interested == interested {
401 Type: func() pp.MessageType {
405 return pp.NotInterested
409 c.Interested = interested
412 // Writes buffers to the socket from the write channel.
413 func (conn *connection) writer() {
414 // Reduce write syscalls.
415 buf := bufio.NewWriterSize(conn.rw, 0x8000) // 32 KiB
416 // Receives when buf is not empty.
417 notEmpty := make(chan struct{}, 1)
419 if buf.Buffered() != 0 {
420 // Make sure it's receivable.
422 case notEmpty <- struct{}{}:
427 case b, ok := <-conn.writeCh:
431 _, err := buf.Write(b)
447 func (conn *connection) writeOptimizer(keepAliveDelay time.Duration) {
448 defer close(conn.writeCh) // Responsible for notifying downstream routines.
449 pending := list.New() // Message queue.
450 var nextWrite []byte // Set to nil if we need to need to marshal the next message.
451 timer := time.NewTimer(keepAliveDelay)
453 lastWrite := time.Now()
455 write := conn.writeCh // Set to nil if there's nothing to write.
456 if pending.Len() == 0 {
458 } else if nextWrite == nil {
460 nextWrite, err = pending.Front().Value.(encoding.BinaryMarshaler).MarshalBinary()
468 if pending.Len() != 0 {
471 keepAliveTime := lastWrite.Add(keepAliveDelay)
472 if time.Now().Before(keepAliveTime) {
473 timer.Reset(keepAliveTime.Sub(time.Now()))
476 pending.PushBack(pp.Message{Keepalive: true})
477 case msg, ok := <-conn.post:
481 if msg.Type == pp.Cancel {
482 for e := pending.Back(); e != nil; e = e.Prev() {
483 elemMsg := e.Value.(pp.Message)
484 if elemMsg.Type == pp.Request && msg.Index == elemMsg.Index && msg.Begin == elemMsg.Begin && msg.Length == elemMsg.Length {
486 optimizedCancels.Add(1)
491 pending.PushBack(msg)
492 case write <- nextWrite:
493 pending.Remove(pending.Front())
495 lastWrite = time.Now()
496 if pending.Len() == 0 {
497 timer.Reset(keepAliveDelay)