18 "github.com/anacrolix/dht"
19 "github.com/anacrolix/missinggo"
20 "github.com/anacrolix/missinggo/bitmap"
21 "github.com/anacrolix/missinggo/perf"
22 "github.com/anacrolix/missinggo/pubsub"
23 "github.com/anacrolix/missinggo/slices"
24 "github.com/bradfitz/iter"
26 "github.com/anacrolix/torrent/bencode"
27 "github.com/anacrolix/torrent/metainfo"
28 pp "github.com/anacrolix/torrent/peer_protocol"
29 "github.com/anacrolix/torrent/storage"
30 "github.com/anacrolix/torrent/tracker"
33 func (t *Torrent) chunkIndexSpec(chunkIndex, piece int) chunkSpec {
34 return chunkIndexSpec(chunkIndex, t.pieceLength(piece), t.chunkSize)
37 type peersKey struct {
42 // Maintains state of torrent within a Client.
46 networkingEnabled bool
48 closed missinggo.Event
49 infoHash metainfo.Hash
51 // Values are the piece indices that changed.
52 pieceStateChanges *pubsub.PubSub
53 // The size of chunks to request from peers over the wire. This is
54 // normally 16KiB by convention these days.
57 // Total length of the torrent in bytes. Stored because it's not O(1) to
58 // get this from the info dict.
61 // The storage to open when the info dict becomes available.
62 storageOpener *storage.Client
63 // Storage for torrent data.
64 storage *storage.Torrent
66 metainfo metainfo.MetaInfo
68 // The info dict. nil if we don't have it (yet).
70 // Active peer connections, running message stream loops.
71 conns map[*connection]struct{}
72 maxEstablishedConns int
73 // Set of addrs to which we're attempting to connect. Connections are
74 // half-open until all handshakes are completed.
75 halfOpen map[string]struct{}
77 // Reserve of peers to connect to. A peer can be both here and in the
78 // active connections if were told about the peer after connecting with
79 // them. That encourages us to reconnect to peers that are well known in
81 peers map[peersKey]Peer
82 wantPeersEvent missinggo.Event
83 // An announcer for each tracker URL.
84 trackerAnnouncers map[string]*trackerScraper
85 // How many times we've initiated a DHT announce. TODO: Move into stats.
88 // Name used if the info name isn't available. Should be cleared when the
89 // Info does become available.
91 // The bencoded bytes of the info dict. This is actively manipulated if
92 // the info bytes aren't initially available, and we try to fetch them
95 // Each element corresponds to the 16KiB metadata pieces. If true, we have
96 // received that piece.
97 metadataCompletedChunks []bool
99 // Set when .Info is obtained.
100 gotMetainfo missinggo.Event
102 readers map[*Reader]struct{}
103 readerNowPieces bitmap.Bitmap
104 readerReadaheadPieces bitmap.Bitmap
106 // The indexes of pieces we want with normal priority, that aren't
107 // currently available.
108 pendingPieces bitmap.Bitmap
109 // A cache of completed piece indices.
110 completedPieces bitmap.Bitmap
112 // A pool of piece priorities []int for assignment to new connections.
113 // These "inclinations" are used to give connections preference for
115 connPieceInclinationPool sync.Pool
116 // Torrent-level statistics.
120 // Returns a channel that is closed when the Torrent is closed.
121 func (t *Torrent) Closed() <-chan struct{} {
122 return t.closed.LockedChan(&t.cl.mu)
125 func (t *Torrent) setChunkSize(size pp.Integer) {
127 t.chunkPool = &sync.Pool{
128 New: func() interface{} {
129 return make([]byte, size)
134 func (t *Torrent) setDisplayName(dn string) {
141 func (t *Torrent) pieceComplete(piece int) bool {
142 return t.completedPieces.Get(piece)
145 func (t *Torrent) pieceCompleteUncached(piece int) bool {
146 return t.pieces[piece].Storage().GetIsComplete()
149 // There's a connection to that address already.
150 func (t *Torrent) addrActive(addr string) bool {
151 if _, ok := t.halfOpen[addr]; ok {
154 for c := range t.conns {
155 if c.remoteAddr().String() == addr {
162 func (t *Torrent) worstUnclosedConns() (ret []*connection) {
163 ret = make([]*connection, 0, len(t.conns))
164 for c := range t.conns {
165 if !c.closed.IsSet() {
172 func (t *Torrent) addPeer(p Peer) {
175 if len(t.peers) >= torrentPeersHighWater {
178 key := peersKey{string(p.IP), p.Port}
179 if _, ok := t.peers[key]; ok {
183 peersAddedBySource.Add(string(p.Source), 1)
188 func (t *Torrent) invalidateMetadata() {
189 for i := range t.metadataCompletedChunks {
190 t.metadataCompletedChunks[i] = false
195 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
199 if index >= len(t.metadataCompletedChunks) {
200 log.Printf("%s: ignoring metadata piece %d", t, index)
203 copy(t.metadataBytes[(1<<14)*index:], data)
204 t.metadataCompletedChunks[index] = true
207 func (t *Torrent) metadataPieceCount() int {
208 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
211 func (t *Torrent) haveMetadataPiece(piece int) bool {
213 return (1<<14)*piece < len(t.metadataBytes)
215 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
219 func (t *Torrent) metadataSizeKnown() bool {
220 return t.metadataBytes != nil
223 func (t *Torrent) metadataSize() int {
224 return len(t.metadataBytes)
227 func infoPieceHashes(info *metainfo.Info) (ret []string) {
228 for i := 0; i < len(info.Pieces); i += sha1.Size {
229 ret = append(ret, string(info.Pieces[i:i+sha1.Size]))
234 func (t *Torrent) makePieces() {
235 hashes := infoPieceHashes(t.info)
236 t.pieces = make([]piece, len(hashes))
237 for i, hash := range hashes {
238 piece := &t.pieces[i]
241 piece.noPendingWrites.L = &piece.pendingWritesMutex
242 missinggo.CopyExact(piece.Hash[:], hash)
246 // Called when metadata for a torrent becomes available.
247 func (t *Torrent) setInfoBytes(b []byte) error {
251 if metainfo.HashBytes(b) != t.infoHash {
252 return errors.New("info bytes have wrong hash")
254 var info metainfo.Info
255 err := bencode.Unmarshal(b, &info)
257 return fmt.Errorf("error unmarshalling info bytes: %s", err)
259 err = validateInfo(&info)
261 return fmt.Errorf("bad info: %s", err)
263 defer t.updateWantPeersEvent()
265 t.displayName = "" // Save a few bytes lol.
266 t.cl.event.Broadcast()
268 t.storage, err = t.storageOpener.OpenTorrent(t.info, t.infoHash)
270 return fmt.Errorf("error opening torrent storage: %s", err)
273 for _, f := range t.info.UpvertedFiles() {
277 t.metadataCompletedChunks = nil
279 for conn := range t.conns {
280 if err := conn.setNumPieces(t.numPieces()); err != nil {
281 log.Printf("closing connection: %s", err)
285 for i := range t.pieces {
286 t.updatePieceCompletion(i)
287 t.pieces[i].QueuedForHash = true
290 for i := range t.pieces {
297 func (t *Torrent) haveAllMetadataPieces() bool {
301 if t.metadataCompletedChunks == nil {
304 for _, have := range t.metadataCompletedChunks {
312 // TODO: Propagate errors to disconnect peer.
313 func (t *Torrent) setMetadataSize(bytes int64) (err error) {
315 // We already know the correct metadata size.
318 if bytes <= 0 || bytes > 10000000 { // 10MB, pulled from my ass.
319 return errors.New("bad size")
321 if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
324 t.metadataBytes = make([]byte, bytes)
325 t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
326 for c := range t.conns {
327 c.requestPendingMetadata()
332 // The current working name for the torrent. Either the name in the info dict,
333 // or a display name given such as by the dn value in a magnet link, or "".
334 func (t *Torrent) name() string {
341 func (t *Torrent) pieceState(index int) (ret PieceState) {
342 p := &t.pieces[index]
343 ret.Priority = t.piecePriority(index)
344 if t.pieceComplete(index) {
347 if p.QueuedForHash || p.Hashing {
350 if !ret.Complete && t.piecePartiallyDownloaded(index) {
356 func (t *Torrent) metadataPieceSize(piece int) int {
357 return metadataPieceSize(len(t.metadataBytes), piece)
360 func (t *Torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
366 d["total_size"] = len(t.metadataBytes)
368 p, err := bencode.Marshal(d)
374 ExtendedID: c.PeerExtensionIDs["ut_metadata"],
375 ExtendedPayload: append(p, data...),
379 func (t *Torrent) pieceStateRuns() (ret []PieceStateRun) {
380 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
381 ret = append(ret, PieceStateRun{
382 PieceState: el.(PieceState),
386 for index := range t.pieces {
387 rle.Append(t.pieceState(index), 1)
393 // Produces a small string representing a PieceStateRun.
394 func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
395 ret = fmt.Sprintf("%d", psr.Length)
396 ret += func() string {
397 switch psr.Priority {
398 case PiecePriorityNext:
400 case PiecePriorityNormal:
402 case PiecePriorityReadahead:
404 case PiecePriorityNow:
422 func (t *Torrent) writeStatus(w io.Writer) {
423 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
424 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
426 fmt.Fprintf(w, "Metadata have: ")
427 for _, h := range t.metadataCompletedChunks {
428 fmt.Fprintf(w, "%c", func() rune {
438 fmt.Fprintf(w, "Piece length: %s\n", func() string {
440 return fmt.Sprint(t.usualPieceSize())
446 fmt.Fprintf(w, "Num Pieces: %d\n", t.numPieces())
447 fmt.Fprint(w, "Piece States:")
448 for _, psr := range t.pieceStateRuns() {
450 w.Write([]byte(pieceStateRunStatusChars(psr)))
454 fmt.Fprintf(w, "Reader Pieces:")
455 t.forReaderOffsetPieces(func(begin, end int) (again bool) {
456 fmt.Fprintf(w, " %d:%d", begin, end)
461 fmt.Fprintf(w, "Trackers:\n")
463 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
464 fmt.Fprintf(tw, " URL\tNext announce\tLast announce\n")
465 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r *trackerScraper) bool {
467 }).([]*trackerScraper) {
468 fmt.Fprintf(tw, " %s\n", ta.statusLine())
473 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
475 fmt.Fprintf(w, "Pending peers: %d\n", len(t.peers))
476 fmt.Fprintf(w, "Half open: %d\n", len(t.halfOpen))
477 fmt.Fprintf(w, "Active peers: %d\n", len(t.conns))
478 conns := t.connsAsSlice()
479 slices.Sort(conns, worseConn)
480 for i, c := range conns {
481 fmt.Fprintf(w, "%2d. ", i+1)
486 func (t *Torrent) haveInfo() bool {
490 // Returns a run-time generated MetaInfo that includes the info bytes and
491 // announce-list as currently known to the client.
492 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
493 return metainfo.MetaInfo{
494 CreationDate: time.Now().Unix(),
495 Comment: "dynamic metainfo from client",
496 CreatedBy: "go.torrent",
497 AnnounceList: t.metainfo.UpvertedAnnounceList(),
498 InfoBytes: t.metadataBytes,
502 func (t *Torrent) BytesMissing() int64 {
504 defer t.mu().RUnlock()
505 return t.bytesMissingLocked()
508 func (t *Torrent) bytesMissingLocked() int64 {
512 func (t *Torrent) bytesLeft() (left int64) {
513 for i := 0; i < t.numPieces(); i++ {
514 left += int64(t.pieces[i].bytesLeft())
519 // Bytes left to give in tracker announces.
520 func (t *Torrent) bytesLeftAnnounce() uint64 {
522 return uint64(t.bytesLeft())
524 return math.MaxUint64
528 func (t *Torrent) piecePartiallyDownloaded(piece int) bool {
529 if t.pieceComplete(piece) {
532 if t.pieceAllDirty(piece) {
535 return t.pieces[piece].hasDirtyChunks()
538 func (t *Torrent) usualPieceSize() int {
539 return int(t.info.PieceLength)
542 func (t *Torrent) numPieces() int {
543 return t.info.NumPieces()
546 func (t *Torrent) numPiecesCompleted() (num int) {
547 return t.completedPieces.Len()
550 func (t *Torrent) close() (err error) {
552 if t.storage != nil {
555 for conn := range t.conns {
558 t.cl.event.Broadcast()
559 t.pieceStateChanges.Close()
560 t.updateWantPeersEvent()
564 func (t *Torrent) requestOffset(r request) int64 {
565 return torrentRequestOffset(t.length, int64(t.usualPieceSize()), r)
568 // Return the request that would include the given offset into the torrent
569 // data. Returns !ok if there is no such request.
570 func (t *Torrent) offsetRequest(off int64) (req request, ok bool) {
571 return torrentOffsetRequest(t.length, t.info.PieceLength, int64(t.chunkSize), off)
574 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
575 tr := perf.NewTimer()
577 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
578 if err == nil && n != len(data) {
579 err = io.ErrShortWrite
582 tr.Mark("write chunk")
587 func (t *Torrent) bitfield() (bf []bool) {
588 bf = make([]bool, t.numPieces())
589 t.completedPieces.IterTyped(func(piece int) (again bool) {
596 func (t *Torrent) pieceNumChunks(piece int) int {
597 return int((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
600 func (t *Torrent) pendAllChunkSpecs(pieceIndex int) {
601 t.pieces[pieceIndex].DirtyChunks.Clear()
609 // Peer is known to support encryption.
610 SupportsEncryption bool
613 func (t *Torrent) pieceLength(piece int) (len_ pp.Integer) {
614 if piece < 0 || piece >= t.info.NumPieces() {
617 if piece == t.numPieces()-1 {
618 len_ = pp.Integer(t.length % t.info.PieceLength)
621 len_ = pp.Integer(t.info.PieceLength)
626 func (t *Torrent) hashPiece(piece int) (ret metainfo.Hash) {
627 hash := pieceHash.New()
628 p := &t.pieces[piece]
629 p.waitNoPendingWrites()
630 ip := t.info.Piece(piece)
632 n, err := io.Copy(hash, io.NewSectionReader(t.pieces[piece].Storage(), 0, pl))
634 missinggo.CopyExact(&ret, hash.Sum(nil))
637 if err != io.ErrUnexpectedEOF && !os.IsNotExist(err) {
638 log.Printf("unexpected error hashing piece with %T: %s", t.storage.TorrentImpl, err)
643 func (t *Torrent) haveAnyPieces() bool {
644 for i := range t.pieces {
645 if t.pieceComplete(i) {
652 func (t *Torrent) havePiece(index int) bool {
653 return t.haveInfo() && t.pieceComplete(index)
656 func (t *Torrent) haveChunk(r request) (ret bool) {
658 // log.Println("have chunk", r, ret)
663 if t.pieceComplete(int(r.Index)) {
666 p := &t.pieces[r.Index]
667 return !p.pendingChunk(r.chunkSpec, t.chunkSize)
670 func chunkIndex(cs chunkSpec, chunkSize pp.Integer) int {
671 return int(cs.Begin / chunkSize)
674 func (t *Torrent) wantPiece(r request) bool {
675 if !t.wantPieceIndex(int(r.Index)) {
678 if t.pieces[r.Index].pendingChunk(r.chunkSpec, t.chunkSize) {
681 // TODO: What about pieces that were wanted, but aren't now, and aren't
682 // completed either? That used to be done here.
686 func (t *Torrent) wantPieceIndex(index int) bool {
690 if index < 0 || index >= t.numPieces() {
693 p := &t.pieces[index]
700 if t.pieceComplete(index) {
703 if t.pendingPieces.Contains(index) {
706 return !t.forReaderOffsetPieces(func(begin, end int) bool {
707 return index < begin || index >= end
711 // The worst connection is one that hasn't been sent, or sent anything useful
712 // for the longest. A bad connection is one that usually sends us unwanted
713 // pieces, or has been in worser half of the established connections for more
715 func (t *Torrent) worstBadConn() *connection {
716 wcs := worseConnSlice{t.worstUnclosedConns()}
718 c := heap.Pop(&wcs).(*connection)
719 if c.UnwantedChunksReceived >= 6 && c.UnwantedChunksReceived > c.UsefulChunksReceived {
722 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
723 // Give connections 1 minute to prove themselves.
724 if time.Since(c.completedHandshake) > time.Minute {
732 type PieceStateChange struct {
737 func (t *Torrent) publishPieceChange(piece int) {
738 cur := t.pieceState(piece)
739 p := &t.pieces[piece]
740 if cur != p.PublicPieceState {
741 p.PublicPieceState = cur
742 t.pieceStateChanges.Publish(PieceStateChange{
749 func (t *Torrent) pieceNumPendingChunks(piece int) int {
750 if t.pieceComplete(piece) {
753 return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
756 func (t *Torrent) pieceAllDirty(piece int) bool {
757 return t.pieces[piece].DirtyChunks.Len() == t.pieceNumChunks(piece)
760 func (t *Torrent) readersChanged() {
761 t.updateReaderPieces()
762 t.updateAllPiecePriorities()
765 func (t *Torrent) updateReaderPieces() {
766 t.readerNowPieces, t.readerReadaheadPieces = t.readerPiecePriorities()
769 func (t *Torrent) readerPosChanged(from, to pieceRange) {
773 t.updateReaderPieces()
774 // Order the ranges, high and low.
776 if l.begin > h.begin {
780 // Two distinct ranges.
781 t.updatePiecePriorities(l.begin, l.end)
782 t.updatePiecePriorities(h.begin, h.end)
789 t.updatePiecePriorities(l.begin, end)
793 func (t *Torrent) maybeNewConns() {
794 // Tickle the accept routine.
795 t.cl.event.Broadcast()
799 func (t *Torrent) piecePriorityChanged(piece int) {
800 for c := range t.conns {
801 c.updatePiecePriority(piece)
804 t.publishPieceChange(piece)
807 func (t *Torrent) updatePiecePriority(piece int) {
808 p := &t.pieces[piece]
809 newPrio := t.piecePriorityUncached(piece)
810 if newPrio == p.priority {
814 t.piecePriorityChanged(piece)
817 func (t *Torrent) updateAllPiecePriorities() {
818 t.updatePiecePriorities(0, len(t.pieces))
821 // Update all piece priorities in one hit. This function should have the same
822 // output as updatePiecePriority, but across all pieces.
823 func (t *Torrent) updatePiecePriorities(begin, end int) {
824 for i := begin; i < end; i++ {
825 t.updatePiecePriority(i)
829 // Returns the range of pieces [begin, end) that contains the extent of bytes.
830 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end int) {
841 begin = int(off / t.info.PieceLength)
842 end = int((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
843 if end > t.info.NumPieces() {
844 end = t.info.NumPieces()
849 // Returns true if all iterations complete without breaking. Returns the read
850 // regions for all readers. The reader regions should not be merged as some
851 // callers depend on this method to enumerate readers.
852 func (t *Torrent) forReaderOffsetPieces(f func(begin, end int) (more bool)) (all bool) {
853 for r := range t.readers {
855 if p.begin >= p.end {
858 if !f(p.begin, p.end) {
865 func (t *Torrent) piecePriority(piece int) piecePriority {
867 return PiecePriorityNone
869 return t.pieces[piece].priority
872 func (t *Torrent) piecePriorityUncached(piece int) piecePriority {
873 if t.pieceComplete(piece) {
874 return PiecePriorityNone
876 if t.readerNowPieces.Contains(piece) {
877 return PiecePriorityNow
879 // if t.readerNowPieces.Contains(piece - 1) {
880 // return PiecePriorityNext
882 if t.readerReadaheadPieces.Contains(piece) {
883 return PiecePriorityReadahead
885 if t.pendingPieces.Contains(piece) {
886 return PiecePriorityNormal
888 return PiecePriorityNone
891 func (t *Torrent) pendPiece(piece int) {
892 if t.pendingPieces.Contains(piece) {
895 if t.havePiece(piece) {
898 t.pendingPieces.Add(piece)
899 t.updatePiecePriority(piece)
902 func (t *Torrent) unpendPieces(unpend *bitmap.Bitmap) {
903 t.pendingPieces.Sub(unpend)
904 unpend.IterTyped(func(piece int) (again bool) {
905 t.updatePiecePriority(piece)
910 func (t *Torrent) pendPieceRange(begin, end int) {
911 for i := begin; i < end; i++ {
916 func (t *Torrent) unpendPieceRange(begin, end int) {
918 bm.AddRange(begin, end)
922 func (t *Torrent) pendRequest(req request) {
923 ci := chunkIndex(req.chunkSpec, t.chunkSize)
924 t.pieces[req.Index].pendChunkIndex(ci)
927 func (t *Torrent) pieceCompletionChanged(piece int) {
928 t.cl.event.Broadcast()
929 if t.pieceComplete(piece) {
930 t.onPieceCompleted(piece)
932 t.onIncompletePiece(piece)
934 t.updatePiecePriority(piece)
937 func (t *Torrent) openNewConns() {
941 func (t *Torrent) getConnPieceInclination() []int {
942 _ret := t.connPieceInclinationPool.Get()
944 pieceInclinationsNew.Add(1)
945 return rand.Perm(t.numPieces())
947 pieceInclinationsReused.Add(1)
951 func (t *Torrent) putPieceInclination(pi []int) {
952 t.connPieceInclinationPool.Put(pi)
953 pieceInclinationsPut.Add(1)
956 func (t *Torrent) updatePieceCompletion(piece int) {
957 pcu := t.pieceCompleteUncached(piece)
958 changed := t.completedPieces.Get(piece) != pcu
959 t.completedPieces.Set(piece, pcu)
961 t.pieceCompletionChanged(piece)
965 // Non-blocking read. Client lock is not required.
966 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
967 p := &t.pieces[off/t.info.PieceLength]
968 p.waitNoPendingWrites()
969 return p.Storage().ReadAt(b, off-p.Info().Offset())
972 func (t *Torrent) updateAllPieceCompletions() {
973 for i := range iter.N(t.numPieces()) {
974 t.updatePieceCompletion(i)
978 // Returns an error if the metadata was completed, but couldn't be set for
979 // some reason. Blame it on the last peer to contribute.
980 func (t *Torrent) maybeCompleteMetadata() error {
985 if !t.haveAllMetadataPieces() {
986 // Don't have enough metadata pieces.
989 err := t.setInfoBytes(t.metadataBytes)
991 t.invalidateMetadata()
992 return fmt.Errorf("error setting info bytes: %s", err)
994 if t.cl.config.Debug {
995 log.Printf("%s: got metadata from peers", t)
1000 func (t *Torrent) readerPieces() (ret bitmap.Bitmap) {
1001 t.forReaderOffsetPieces(func(begin, end int) bool {
1002 ret.AddRange(begin, end)
1008 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1009 t.forReaderOffsetPieces(func(begin, end int) bool {
1012 readahead.AddRange(begin+1, end)
1019 func (t *Torrent) needData() bool {
1020 if t.closed.IsSet() {
1026 if t.pendingPieces.Len() != 0 {
1029 // Read as "not all complete".
1030 return !t.readerPieces().IterTyped(func(piece int) bool {
1031 return t.pieceComplete(piece)
1035 func appendMissingStrings(old, new []string) (ret []string) {
1038 for _, n := range new {
1039 for _, o := range old {
1044 ret = append(ret, n)
1049 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1051 for minNumTiers > len(ret) {
1052 ret = append(ret, nil)
1057 func (t *Torrent) addTrackers(announceList [][]string) {
1058 fullAnnounceList := &t.metainfo.AnnounceList
1059 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1060 for tierIndex, trackerURLs := range announceList {
1061 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1063 t.startMissingTrackerScrapers()
1064 t.updateWantPeersEvent()
1067 // Don't call this before the info is available.
1068 func (t *Torrent) bytesCompleted() int64 {
1072 return t.info.TotalLength() - t.bytesLeft()
1075 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1077 defer t.cl.mu.Unlock()
1078 return t.setInfoBytes(b)
1081 // Returns true if connection is removed from torrent.Conns.
1082 func (t *Torrent) deleteConnection(c *connection) (ret bool) {
1088 func (t *Torrent) dropConnection(c *connection) {
1089 t.cl.event.Broadcast()
1091 if t.deleteConnection(c) {
1096 func (t *Torrent) wantPeers() bool {
1097 if t.closed.IsSet() {
1100 if len(t.peers) > torrentPeersLowWater {
1103 return t.needData() || t.seeding()
1106 func (t *Torrent) updateWantPeersEvent() {
1108 t.wantPeersEvent.Set()
1110 t.wantPeersEvent.Clear()
1114 // Returns whether the client should make effort to seed the torrent.
1115 func (t *Torrent) seeding() bool {
1117 if t.closed.IsSet() {
1120 if cl.config.NoUpload {
1123 if !cl.config.Seed {
1132 func (t *Torrent) startScrapingTracker(url string) {
1136 if _, ok := t.trackerAnnouncers[url]; ok {
1139 newAnnouncer := &trackerScraper{
1143 if t.trackerAnnouncers == nil {
1144 t.trackerAnnouncers = make(map[string]*trackerScraper)
1146 t.trackerAnnouncers[url] = newAnnouncer
1147 go newAnnouncer.Run()
1150 // Adds and starts tracker scrapers for tracker URLs that aren't already
1152 func (t *Torrent) startMissingTrackerScrapers() {
1153 if t.cl.config.DisableTrackers {
1156 t.startScrapingTracker(t.metainfo.Announce)
1157 for _, tier := range t.metainfo.AnnounceList {
1158 for _, url := range tier {
1159 t.startScrapingTracker(url)
1164 // Returns an AnnounceRequest with fields filled out to defaults and current
1166 func (t *Torrent) announceRequest() tracker.AnnounceRequest {
1167 return tracker.AnnounceRequest{
1168 Event: tracker.None,
1170 Port: uint16(t.cl.incomingPeerPort()),
1171 PeerId: t.cl.peerID,
1172 InfoHash: t.infoHash,
1173 Left: t.bytesLeftAnnounce(),
1177 // Adds peers revealed in an announce until the announce ends, or we have
1179 func (t *Torrent) consumeDHTAnnounce(pvs <-chan dht.PeersValues) {
1181 // Count all the unique addresses we got during this announce.
1182 allAddrs := make(map[string]struct{})
1185 case v, ok := <-pvs:
1189 addPeers := make([]Peer, 0, len(v.Peers))
1190 for _, cp := range v.Peers {
1192 // Can't do anything with this.
1195 addPeers = append(addPeers, Peer{
1198 Source: peerSourceDHTGetPeers,
1200 key := (&net.UDPAddr{
1204 allAddrs[key] = struct{}{}
1207 t.addPeers(addPeers)
1208 numPeers := len(t.peers)
1210 if numPeers >= torrentPeersHighWater {
1213 case <-t.closed.LockedChan(&cl.mu):
1219 func (t *Torrent) announceDHT(impliedPort bool) (err error) {
1221 ps, err := cl.dHT.Announce(t.infoHash, cl.incomingPeerPort(), impliedPort)
1225 t.consumeDHTAnnounce(ps.Peers)
1230 func (t *Torrent) dhtAnnouncer() {
1234 case <-t.wantPeersEvent.LockedChan(&cl.mu):
1235 case <-t.closed.LockedChan(&cl.mu):
1238 err := t.announceDHT(true)
1241 defer cl.mu.Unlock()
1245 log.Printf("error announcing %q to DHT: %s", t, err)
1249 case <-t.closed.LockedChan(&cl.mu):
1251 case <-time.After(5 * time.Minute):
1256 func (t *Torrent) addPeers(peers []Peer) {
1257 for _, p := range peers {
1258 if t.cl.badPeerIPPort(p.IP, p.Port) {
1265 func (t *Torrent) Stats() TorrentStats {
1267 defer t.cl.mu.Unlock()
1269 t.stats.ActivePeers = len(t.conns)
1270 t.stats.HalfOpenPeers = len(t.halfOpen)
1271 t.stats.PendingPeers = len(t.peers)
1272 t.stats.TotalPeers = t.numTotalPeers()
1277 // The total number of peers in the torrent.
1278 func (t *Torrent) numTotalPeers() int {
1279 peers := make(map[string]struct{})
1280 for conn := range t.conns {
1281 peers[conn.conn.RemoteAddr().String()] = struct{}{}
1283 for addr := range t.halfOpen {
1284 peers[addr] = struct{}{}
1286 for _, peer := range t.peers {
1287 peers[fmt.Sprintf("%s:%d", peer.IP, peer.Port)] = struct{}{}
1292 // Returns true if the connection is added.
1293 func (t *Torrent) addConnection(c *connection, outgoing bool) bool {
1294 if t.cl.closed.IsSet() {
1300 for c0 := range t.conns {
1301 if c.PeerID == c0.PeerID {
1302 // Already connected to a client with that ID.
1303 duplicateClientConns.Add(1)
1304 lower := string(t.cl.peerID[:]) < string(c.PeerID[:])
1305 // Retain the connection from initiated from lower peer ID to
1307 if outgoing == lower {
1308 // Close the other one.
1310 // Is it safe to delete from the map while we're iterating
1312 t.deleteConnection(c0)
1314 // Abandon this one.
1319 if len(t.conns) >= t.maxEstablishedConns {
1320 c := t.worstBadConn()
1324 if t.cl.config.Debug && missinggo.CryHeard() {
1325 log.Printf("%s: dropping connection to make room for new one:\n %s", t, c)
1328 t.deleteConnection(c)
1330 if len(t.conns) >= t.maxEstablishedConns {
1334 panic("connection already associated with a torrent")
1336 // Reconcile bytes transferred before connection was associated with a
1338 t.stats.wroteBytes(c.stats.BytesWritten)
1339 t.stats.readBytes(c.stats.BytesRead)
1341 t.conns[c] = struct{}{}
1345 func (t *Torrent) wantConns() bool {
1346 if !t.networkingEnabled {
1349 if t.closed.IsSet() {
1352 if !t.seeding() && !t.needData() {
1355 if len(t.conns) < t.maxEstablishedConns {
1358 return t.worstBadConn() != nil
1361 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1363 defer t.cl.mu.Unlock()
1364 oldMax = t.maxEstablishedConns
1365 t.maxEstablishedConns = max
1366 wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), worseConn)
1367 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1368 t.dropConnection(wcs.Pop().(*connection))
1374 func (t *Torrent) mu() missinggo.RWLocker {
1378 func (t *Torrent) pieceHashed(piece int, correct bool) {
1379 if t.closed.IsSet() {
1382 p := &t.pieces[piece]
1383 touchers := t.reapPieceTouchers(piece)
1385 // Don't score the first time a piece is hashed, it could be an
1388 pieceHashedCorrect.Add(1)
1390 log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.Hash, len(touchers))
1391 pieceHashedNotCorrect.Add(1)
1396 for _, c := range touchers {
1397 c.goodPiecesDirtied++
1399 err := p.Storage().MarkComplete()
1401 log.Printf("%T: error completing piece %d: %s", t.storage, piece, err)
1403 t.updatePieceCompletion(piece)
1405 if len(touchers) != 0 {
1406 for _, c := range touchers {
1407 // Y u do dis peer?!
1408 c.badPiecesDirtied++
1410 slices.Sort(touchers, connLessTrusted)
1411 log.Printf("dropping first corresponding conn from trust: %v", func() (ret []int) {
1412 for _, c := range touchers {
1413 ret = append(ret, c.netGoodPiecesDirtied())
1418 t.cl.banPeerIP(missinggo.AddrIP(c.remoteAddr()))
1421 t.onIncompletePiece(piece)
1425 func (t *Torrent) onPieceCompleted(piece int) {
1426 t.pendingPieces.Remove(piece)
1427 t.pendAllChunkSpecs(piece)
1428 for conn := range t.conns {
1430 for r := range conn.Requests {
1431 if int(r.Index) == piece {
1435 // Could check here if peer doesn't have piece, but due to caching
1436 // some peers may have said they have a piece but they don't.
1441 func (t *Torrent) onIncompletePiece(piece int) {
1442 if t.pieceAllDirty(piece) {
1443 t.pendAllChunkSpecs(piece)
1445 if !t.wantPieceIndex(piece) {
1448 // We could drop any connections that we told we have a piece that we
1449 // don't here. But there's a test failure, and it seems clients don't care
1450 // if you request pieces that you already claim to have. Pruning bad
1451 // connections might just remove any connections that aren't treating us
1452 // favourably anyway.
1454 // for c := range t.conns {
1455 // if c.sentHave(piece) {
1459 for conn := range t.conns {
1460 if conn.PeerHasPiece(piece) {
1461 conn.updateRequests()
1466 func (t *Torrent) verifyPiece(piece int) {
1469 defer cl.mu.Unlock()
1470 p := &t.pieces[piece]
1471 for p.Hashing || t.storage == nil {
1474 p.QueuedForHash = false
1475 if t.closed.IsSet() || t.pieceComplete(piece) {
1476 t.updatePiecePriority(piece)
1480 t.publishPieceChange(piece)
1482 sum := t.hashPiece(piece)
1485 t.pieceHashed(piece, sum == p.Hash)
1488 // Return the connections that touched a piece, and clear the entry while
1490 func (t *Torrent) reapPieceTouchers(piece int) (ret []*connection) {
1491 for c := range t.conns {
1492 if _, ok := c.peerTouchedPieces[piece]; ok {
1493 ret = append(ret, c)
1494 delete(c.peerTouchedPieces, piece)
1500 func (t *Torrent) connsAsSlice() (ret []*connection) {
1501 for c := range t.conns {
1502 ret = append(ret, c)
1507 // Currently doesn't really queue, but should in the future.
1508 func (t *Torrent) queuePieceCheck(pieceIndex int) {
1509 piece := &t.pieces[pieceIndex]
1510 if piece.QueuedForHash {
1513 piece.QueuedForHash = true
1514 t.publishPieceChange(pieceIndex)
1515 go t.verifyPiece(pieceIndex)