19 "github.com/anacrolix/missinggo/prioritybitmap"
21 "github.com/anacrolix/dht"
22 "github.com/anacrolix/missinggo"
23 "github.com/anacrolix/missinggo/bitmap"
24 "github.com/anacrolix/missinggo/perf"
25 "github.com/anacrolix/missinggo/pubsub"
26 "github.com/anacrolix/missinggo/slices"
27 "github.com/bradfitz/iter"
29 "github.com/anacrolix/torrent/bencode"
30 "github.com/anacrolix/torrent/metainfo"
31 pp "github.com/anacrolix/torrent/peer_protocol"
32 "github.com/anacrolix/torrent/storage"
33 "github.com/anacrolix/torrent/tracker"
36 func (t *Torrent) chunkIndexSpec(chunkIndex, piece int) chunkSpec {
37 return chunkIndexSpec(chunkIndex, t.pieceLength(piece), t.chunkSize)
40 type peersKey struct {
45 // Maintains state of torrent within a Client.
49 networkingEnabled bool
52 closed missinggo.Event
53 infoHash metainfo.Hash
55 // Values are the piece indices that changed.
56 pieceStateChanges *pubsub.PubSub
57 // The size of chunks to request from peers over the wire. This is
58 // normally 16KiB by convention these days.
61 // Total length of the torrent in bytes. Stored because it's not O(1) to
62 // get this from the info dict.
65 // The storage to open when the info dict becomes available.
66 storageOpener *storage.Client
67 // Storage for torrent data.
68 storage *storage.Torrent
69 // Read-locked for using storage, and write-locked for Closing.
70 storageLock sync.RWMutex
72 metainfo metainfo.MetaInfo
74 // The info dict. nil if we don't have it (yet).
77 // Active peer connections, running message stream loops.
78 conns map[*connection]struct{}
79 maxEstablishedConns int
80 // Set of addrs to which we're attempting to connect. Connections are
81 // half-open until all handshakes are completed.
82 halfOpen map[string]Peer
83 fastestConn *connection
85 // Reserve of peers to connect to. A peer can be both here and in the
86 // active connections if were told about the peer after connecting with
87 // them. That encourages us to reconnect to peers that are well known in
89 peers map[peersKey]Peer
90 wantPeersEvent missinggo.Event
91 // An announcer for each tracker URL.
92 trackerAnnouncers map[string]*trackerScraper
93 // How many times we've initiated a DHT announce. TODO: Move into stats.
96 // Name used if the info name isn't available. Should be cleared when the
97 // Info does become available.
100 // The bencoded bytes of the info dict. This is actively manipulated if
101 // the info bytes aren't initially available, and we try to fetch them
104 // Each element corresponds to the 16KiB metadata pieces. If true, we have
105 // received that piece.
106 metadataCompletedChunks []bool
107 metadataChanged sync.Cond
109 // Set when .Info is obtained.
110 gotMetainfo missinggo.Event
112 readers map[*reader]struct{}
113 readerNowPieces bitmap.Bitmap
114 readerReadaheadPieces bitmap.Bitmap
116 // The indexes of pieces we want with normal priority, that aren't
117 // currently available.
118 pendingPieces prioritybitmap.PriorityBitmap
119 // A cache of completed piece indices.
120 completedPieces bitmap.Bitmap
121 // Pieces that need to be hashed.
122 piecesQueuedForHash bitmap.Bitmap
124 // A pool of piece priorities []int for assignment to new connections.
125 // These "inclinations" are used to give connections preference for
127 connPieceInclinationPool sync.Pool
128 // Torrent-level statistics.
132 // Returns a channel that is closed when the Torrent is closed.
133 func (t *Torrent) Closed() <-chan struct{} {
134 return t.closed.LockedChan(&t.cl.mu)
137 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
138 // pending, and half-open peers.
139 func (t *Torrent) KnownSwarm() (ks []Peer) {
140 // Add pending peers to the list
141 for _, peer := range t.peers {
142 ks = append(ks, peer)
145 // Add half-open peers to the list
146 for _, peer := range t.halfOpen {
147 ks = append(ks, peer)
150 // Add active peers to the list
151 for conn := range t.conns {
152 host, portString, err := net.SplitHostPort(conn.remoteAddr().String())
157 ip := net.ParseIP(host)
158 port, err := strconv.Atoi(portString)
163 ks = append(ks, Peer{
167 Source: conn.Discovery,
168 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
169 // > But if we're not connected to them with an encrypted connection, I couldn't say
170 // > what's appropriate. We can carry forward the SupportsEncryption value as we
171 // > received it from trackers/DHT/PEX, or just use the encryption state for the
172 // > connection. It's probably easiest to do the latter for now.
173 // https://github.com/anacrolix/torrent/pull/188
174 SupportsEncryption: conn.headerEncrypted,
181 func (t *Torrent) setChunkSize(size pp.Integer) {
183 t.chunkPool = &sync.Pool{
184 New: func() interface{} {
185 b := make([]byte, size)
191 func (t *Torrent) setDisplayName(dn string) {
198 func (t *Torrent) pieceComplete(piece int) bool {
199 return t.completedPieces.Get(piece)
202 func (t *Torrent) pieceCompleteUncached(piece int) storage.Completion {
203 return t.pieces[piece].Storage().Completion()
206 // There's a connection to that address already.
207 func (t *Torrent) addrActive(addr string) bool {
208 if _, ok := t.halfOpen[addr]; ok {
211 for c := range t.conns {
216 if ra.String() == addr {
223 func (t *Torrent) unclosedConnsAsSlice() (ret []*connection) {
224 ret = make([]*connection, 0, len(t.conns))
225 for c := range t.conns {
226 if !c.closed.IsSet() {
233 func (t *Torrent) addPeer(p Peer) {
236 if len(t.peers) >= cl.config.TorrentPeersHighWater {
239 key := peersKey{string(p.IP), p.Port}
240 if _, ok := t.peers[key]; ok {
244 peersAddedBySource.Add(string(p.Source), 1)
249 func (t *Torrent) invalidateMetadata() {
250 for i := range t.metadataCompletedChunks {
251 t.metadataCompletedChunks[i] = false
256 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
260 if index >= len(t.metadataCompletedChunks) {
261 log.Printf("%s: ignoring metadata piece %d", t, index)
264 copy(t.metadataBytes[(1<<14)*index:], data)
265 t.metadataCompletedChunks[index] = true
268 func (t *Torrent) metadataPieceCount() int {
269 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
272 func (t *Torrent) haveMetadataPiece(piece int) bool {
274 return (1<<14)*piece < len(t.metadataBytes)
276 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
280 func (t *Torrent) metadataSizeKnown() bool {
281 return t.metadataBytes != nil
284 func (t *Torrent) metadataSize() int {
285 return len(t.metadataBytes)
288 func infoPieceHashes(info *metainfo.Info) (ret []string) {
289 for i := 0; i < len(info.Pieces); i += sha1.Size {
290 ret = append(ret, string(info.Pieces[i:i+sha1.Size]))
295 func (t *Torrent) makePieces() {
296 hashes := infoPieceHashes(t.info)
297 t.pieces = make([]Piece, len(hashes))
298 for i, hash := range hashes {
299 piece := &t.pieces[i]
302 piece.noPendingWrites.L = &piece.pendingWritesMutex
303 missinggo.CopyExact(piece.hash[:], hash)
307 // Called when metadata for a torrent becomes available.
308 func (t *Torrent) setInfoBytes(b []byte) error {
312 if metainfo.HashBytes(b) != t.infoHash {
313 return errors.New("info bytes have wrong hash")
315 var info metainfo.Info
316 err := bencode.Unmarshal(b, &info)
318 return fmt.Errorf("error unmarshalling info bytes: %s", err)
320 err = validateInfo(&info)
322 return fmt.Errorf("bad info: %s", err)
324 defer t.updateWantPeersEvent()
326 t.displayName = "" // Save a few bytes lol.
327 t.cl.event.Broadcast()
329 t.storage, err = t.storageOpener.OpenTorrent(t.info, t.infoHash)
331 return fmt.Errorf("error opening torrent storage: %s", err)
334 for _, f := range t.info.UpvertedFiles() {
339 t.metadataCompletedChunks = nil
341 for conn := range t.conns {
342 if err := conn.setNumPieces(t.numPieces()); err != nil {
343 log.Printf("closing connection: %s", err)
347 for i := range t.pieces {
348 t.updatePieceCompletion(i)
350 if !p.storageCompletionOk {
351 // log.Printf("piece %s completion unknown, queueing check", p)
358 func (t *Torrent) haveAllMetadataPieces() bool {
362 if t.metadataCompletedChunks == nil {
365 for _, have := range t.metadataCompletedChunks {
373 // TODO: Propagate errors to disconnect peer.
374 func (t *Torrent) setMetadataSize(bytes int64) (err error) {
376 // We already know the correct metadata size.
379 if bytes <= 0 || bytes > 10000000 { // 10MB, pulled from my ass.
380 return errors.New("bad size")
382 if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
385 t.metadataBytes = make([]byte, bytes)
386 t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
387 t.metadataChanged.Broadcast()
388 for c := range t.conns {
389 c.requestPendingMetadata()
394 // The current working name for the torrent. Either the name in the info dict,
395 // or a display name given such as by the dn value in a magnet link, or "".
396 func (t *Torrent) name() string {
403 func (t *Torrent) pieceState(index int) (ret PieceState) {
404 p := &t.pieces[index]
405 ret.Priority = t.piecePriority(index)
406 if t.pieceComplete(index) {
409 if p.queuedForHash() || p.hashing {
412 if !ret.Complete && t.piecePartiallyDownloaded(index) {
418 func (t *Torrent) metadataPieceSize(piece int) int {
419 return metadataPieceSize(len(t.metadataBytes), piece)
422 func (t *Torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
428 d["total_size"] = len(t.metadataBytes)
430 p, err := bencode.Marshal(d)
436 ExtendedID: c.PeerExtensionIDs["ut_metadata"],
437 ExtendedPayload: append(p, data...),
441 func (t *Torrent) pieceStateRuns() (ret []PieceStateRun) {
442 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
443 ret = append(ret, PieceStateRun{
444 PieceState: el.(PieceState),
448 for index := range t.pieces {
449 rle.Append(t.pieceState(index), 1)
455 // Produces a small string representing a PieceStateRun.
456 func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
457 ret = fmt.Sprintf("%d", psr.Length)
458 ret += func() string {
459 switch psr.Priority {
460 case PiecePriorityNext:
462 case PiecePriorityNormal:
464 case PiecePriorityReadahead:
466 case PiecePriorityNow:
484 func (t *Torrent) writeStatus(w io.Writer) {
485 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
486 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
488 fmt.Fprintf(w, "Metadata have: ")
489 for _, h := range t.metadataCompletedChunks {
490 fmt.Fprintf(w, "%c", func() rune {
500 fmt.Fprintf(w, "Piece length: %s\n", func() string {
502 return fmt.Sprint(t.usualPieceSize())
508 fmt.Fprintf(w, "Num Pieces: %d\n", t.numPieces())
509 fmt.Fprint(w, "Piece States:")
510 for _, psr := range t.pieceStateRuns() {
512 w.Write([]byte(pieceStateRunStatusChars(psr)))
516 fmt.Fprintf(w, "Reader Pieces:")
517 t.forReaderOffsetPieces(func(begin, end int) (again bool) {
518 fmt.Fprintf(w, " %d:%d", begin, end)
523 fmt.Fprintf(w, "Trackers:\n")
525 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
526 fmt.Fprintf(tw, " URL\tNext announce\tLast announce\n")
527 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r *trackerScraper) bool {
529 }).([]*trackerScraper) {
530 fmt.Fprintf(tw, " %s\n", ta.statusLine())
535 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
537 fmt.Fprintf(w, "Pending peers: %d\n", len(t.peers))
538 fmt.Fprintf(w, "Half open: %d\n", len(t.halfOpen))
539 fmt.Fprintf(w, "Active peers: %d\n", len(t.conns))
540 conns := t.connsAsSlice()
541 slices.Sort(conns, worseConn)
542 for i, c := range conns {
543 fmt.Fprintf(w, "%2d. ", i+1)
548 func (t *Torrent) haveInfo() bool {
552 // Returns a run-time generated MetaInfo that includes the info bytes and
553 // announce-list as currently known to the client.
554 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
555 return metainfo.MetaInfo{
556 CreationDate: time.Now().Unix(),
557 Comment: "dynamic metainfo from client",
558 CreatedBy: "go.torrent",
559 AnnounceList: t.metainfo.UpvertedAnnounceList(),
560 InfoBytes: func() []byte {
562 return t.metadataBytes
570 func (t *Torrent) BytesMissing() int64 {
572 defer t.mu().RUnlock()
573 return t.bytesMissingLocked()
576 func (t *Torrent) bytesMissingLocked() int64 {
580 func (t *Torrent) bytesLeft() (left int64) {
581 bitmap.Flip(t.completedPieces, 0, t.numPieces()).IterTyped(func(piece int) bool {
582 p := &t.pieces[piece]
583 left += int64(p.length() - p.numDirtyBytes())
589 // Bytes left to give in tracker announces.
590 func (t *Torrent) bytesLeftAnnounce() uint64 {
592 return uint64(t.bytesLeft())
594 return math.MaxUint64
598 func (t *Torrent) piecePartiallyDownloaded(piece int) bool {
599 if t.pieceComplete(piece) {
602 if t.pieceAllDirty(piece) {
605 return t.pieces[piece].hasDirtyChunks()
608 func (t *Torrent) usualPieceSize() int {
609 return int(t.info.PieceLength)
612 func (t *Torrent) numPieces() int {
613 return t.info.NumPieces()
616 func (t *Torrent) numPiecesCompleted() (num int) {
617 return t.completedPieces.Len()
620 func (t *Torrent) close() (err error) {
622 if t.storage != nil {
625 t.storageLock.Unlock()
627 for conn := range t.conns {
630 t.cl.event.Broadcast()
631 t.pieceStateChanges.Close()
632 t.updateWantPeersEvent()
636 func (t *Torrent) requestOffset(r request) int64 {
637 return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
640 // Return the request that would include the given offset into the torrent
641 // data. Returns !ok if there is no such request.
642 func (t *Torrent) offsetRequest(off int64) (req request, ok bool) {
643 return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
646 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
647 tr := perf.NewTimer()
649 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
650 if err == nil && n != len(data) {
651 err = io.ErrShortWrite
654 tr.Mark("write chunk")
659 func (t *Torrent) bitfield() (bf []bool) {
660 bf = make([]bool, t.numPieces())
661 t.completedPieces.IterTyped(func(piece int) (again bool) {
668 func (t *Torrent) pieceNumChunks(piece int) int {
669 return int((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
672 func (t *Torrent) pendAllChunkSpecs(pieceIndex int) {
673 t.pieces[pieceIndex].dirtyChunks.Clear()
681 // Peer is known to support encryption.
682 SupportsEncryption bool
685 func (t *Torrent) pieceLength(piece int) pp.Integer {
686 if piece == t.numPieces()-1 {
687 ret := pp.Integer(*t.length % t.info.PieceLength)
692 return pp.Integer(t.info.PieceLength)
695 func (t *Torrent) hashPiece(piece int) (ret metainfo.Hash) {
696 hash := pieceHash.New()
697 p := &t.pieces[piece]
698 p.waitNoPendingWrites()
699 ip := t.info.Piece(piece)
701 n, err := io.Copy(hash, io.NewSectionReader(t.pieces[piece].Storage(), 0, pl))
703 missinggo.CopyExact(&ret, hash.Sum(nil))
706 if err != io.ErrUnexpectedEOF && !os.IsNotExist(err) {
707 log.Printf("unexpected error hashing piece with %T: %s", t.storage.TorrentImpl, err)
712 func (t *Torrent) haveAnyPieces() bool {
713 for i := range t.pieces {
714 if t.pieceComplete(i) {
721 func (t *Torrent) havePiece(index int) bool {
722 return t.haveInfo() && t.pieceComplete(index)
725 func (t *Torrent) haveChunk(r request) (ret bool) {
727 // log.Println("have chunk", r, ret)
732 if t.pieceComplete(int(r.Index)) {
735 p := &t.pieces[r.Index]
736 return !p.pendingChunk(r.chunkSpec, t.chunkSize)
739 func chunkIndex(cs chunkSpec, chunkSize pp.Integer) int {
740 return int(cs.Begin / chunkSize)
743 func (t *Torrent) wantPiece(r request) bool {
744 if !t.wantPieceIndex(int(r.Index)) {
747 if t.pieces[r.Index].pendingChunk(r.chunkSpec, t.chunkSize) {
750 // TODO: What about pieces that were wanted, but aren't now, and aren't
751 // completed either? That used to be done here.
755 func (t *Torrent) wantPieceIndex(index int) bool {
759 if index < 0 || index >= t.numPieces() {
762 p := &t.pieces[index]
763 if p.queuedForHash() {
769 if t.pieceComplete(index) {
772 if t.pendingPieces.Contains(index) {
775 return !t.forReaderOffsetPieces(func(begin, end int) bool {
776 return index < begin || index >= end
780 // The worst connection is one that hasn't been sent, or sent anything useful
781 // for the longest. A bad connection is one that usually sends us unwanted
782 // pieces, or has been in worser half of the established connections for more
784 func (t *Torrent) worstBadConn() *connection {
785 wcs := worseConnSlice{t.unclosedConnsAsSlice()}
788 c := heap.Pop(&wcs).(*connection)
789 if c.UnwantedChunksReceived >= 6 && c.UnwantedChunksReceived > c.UsefulChunksReceived {
792 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
793 // Give connections 1 minute to prove themselves.
794 if time.Since(c.completedHandshake) > time.Minute {
802 type PieceStateChange struct {
807 func (t *Torrent) publishPieceChange(piece int) {
808 cur := t.pieceState(piece)
809 p := &t.pieces[piece]
810 if cur != p.publicPieceState {
811 p.publicPieceState = cur
812 t.pieceStateChanges.Publish(PieceStateChange{
819 func (t *Torrent) pieceNumPendingChunks(piece int) int {
820 if t.pieceComplete(piece) {
823 return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
826 func (t *Torrent) pieceAllDirty(piece int) bool {
827 return t.pieces[piece].dirtyChunks.Len() == t.pieceNumChunks(piece)
830 func (t *Torrent) readersChanged() {
831 t.updateReaderPieces()
832 t.updateAllPiecePriorities()
835 func (t *Torrent) updateReaderPieces() {
836 t.readerNowPieces, t.readerReadaheadPieces = t.readerPiecePriorities()
839 func (t *Torrent) readerPosChanged(from, to pieceRange) {
843 t.updateReaderPieces()
844 // Order the ranges, high and low.
846 if l.begin > h.begin {
850 // Two distinct ranges.
851 t.updatePiecePriorities(l.begin, l.end)
852 t.updatePiecePriorities(h.begin, h.end)
859 t.updatePiecePriorities(l.begin, end)
863 func (t *Torrent) maybeNewConns() {
864 // Tickle the accept routine.
865 t.cl.event.Broadcast()
869 func (t *Torrent) piecePriorityChanged(piece int) {
870 for c := range t.conns {
871 if c.updatePiecePriority(piece) {
876 t.publishPieceChange(piece)
879 func (t *Torrent) updatePiecePriority(piece int) {
880 p := &t.pieces[piece]
881 newPrio := t.piecePriorityUncached(piece)
882 if newPrio == p.priority {
886 t.piecePriorityChanged(piece)
889 func (t *Torrent) updateAllPiecePriorities() {
890 t.updatePiecePriorities(0, len(t.pieces))
893 // Update all piece priorities in one hit. This function should have the same
894 // output as updatePiecePriority, but across all pieces.
895 func (t *Torrent) updatePiecePriorities(begin, end int) {
896 for i := begin; i < end; i++ {
897 t.updatePiecePriority(i)
901 // Returns the range of pieces [begin, end) that contains the extent of bytes.
902 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end int) {
903 if off >= *t.length {
913 begin = int(off / t.info.PieceLength)
914 end = int((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
915 if end > t.info.NumPieces() {
916 end = t.info.NumPieces()
921 // Returns true if all iterations complete without breaking. Returns the read
922 // regions for all readers. The reader regions should not be merged as some
923 // callers depend on this method to enumerate readers.
924 func (t *Torrent) forReaderOffsetPieces(f func(begin, end int) (more bool)) (all bool) {
925 for r := range t.readers {
927 if p.begin >= p.end {
930 if !f(p.begin, p.end) {
937 func (t *Torrent) piecePriority(piece int) piecePriority {
939 return PiecePriorityNone
941 return t.pieces[piece].priority
944 func (t *Torrent) piecePriorityUncached(piece int) piecePriority {
945 if t.pieceComplete(piece) {
946 return PiecePriorityNone
948 if t.readerNowPieces.Contains(piece) {
949 return PiecePriorityNow
951 // if t.readerNowPieces.Contains(piece - 1) {
952 // return PiecePriorityNext
954 if t.readerReadaheadPieces.Contains(piece) {
955 return PiecePriorityReadahead
957 if t.pendingPieces.Contains(piece) {
958 return PiecePriorityNormal
960 return PiecePriorityNone
963 func (t *Torrent) pendPiece(piece int) {
964 if t.pendingPieces.Contains(piece) {
967 if t.havePiece(piece) {
970 t.pendingPieces.Set(piece, PiecePriorityNormal.BitmapPriority())
971 t.updatePiecePriority(piece)
974 func (t *Torrent) unpendPieces(unpend bitmap.Bitmap) {
975 unpend.IterTyped(func(piece int) (more bool) {
976 t.pendingPieces.Remove(piece)
977 t.updatePiecePriority(piece)
982 func (t *Torrent) pendPieceRange(begin, end int) {
983 for i := begin; i < end; i++ {
988 func (t *Torrent) unpendPieceRange(begin, end int) {
990 bm.AddRange(begin, end)
994 func (t *Torrent) pendRequest(req request) {
995 ci := chunkIndex(req.chunkSpec, t.chunkSize)
996 t.pieces[req.Index].pendChunkIndex(ci)
999 func (t *Torrent) pieceCompletionChanged(piece int) {
1000 t.cl.event.Broadcast()
1001 if t.pieceComplete(piece) {
1002 t.onPieceCompleted(piece)
1004 t.onIncompletePiece(piece)
1006 t.updatePiecePriority(piece)
1009 func (t *Torrent) openNewConns() {
1010 t.cl.openNewConns(t)
1013 func (t *Torrent) getConnPieceInclination() []int {
1014 _ret := t.connPieceInclinationPool.Get()
1016 pieceInclinationsNew.Add(1)
1017 return rand.Perm(t.numPieces())
1019 pieceInclinationsReused.Add(1)
1020 return *_ret.(*[]int)
1023 func (t *Torrent) putPieceInclination(pi []int) {
1024 t.connPieceInclinationPool.Put(&pi)
1025 pieceInclinationsPut.Add(1)
1028 func (t *Torrent) updatePieceCompletion(piece int) {
1029 pcu := t.pieceCompleteUncached(piece)
1030 p := &t.pieces[piece]
1031 changed := t.completedPieces.Get(piece) != pcu.Complete || p.storageCompletionOk != pcu.Ok
1032 p.storageCompletionOk = pcu.Ok
1033 t.completedPieces.Set(piece, pcu.Complete)
1035 t.pieceCompletionChanged(piece)
1039 // Non-blocking read. Client lock is not required.
1040 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1041 p := &t.pieces[off/t.info.PieceLength]
1042 p.waitNoPendingWrites()
1043 return p.Storage().ReadAt(b, off-p.Info().Offset())
1046 func (t *Torrent) updateAllPieceCompletions() {
1047 for i := range iter.N(t.numPieces()) {
1048 t.updatePieceCompletion(i)
1052 // Returns an error if the metadata was completed, but couldn't be set for
1053 // some reason. Blame it on the last peer to contribute.
1054 func (t *Torrent) maybeCompleteMetadata() error {
1059 if !t.haveAllMetadataPieces() {
1060 // Don't have enough metadata pieces.
1063 err := t.setInfoBytes(t.metadataBytes)
1065 t.invalidateMetadata()
1066 return fmt.Errorf("error setting info bytes: %s", err)
1068 if t.cl.config.Debug {
1069 log.Printf("%s: got metadata from peers", t)
1074 func (t *Torrent) readerPieces() (ret bitmap.Bitmap) {
1075 t.forReaderOffsetPieces(func(begin, end int) bool {
1076 ret.AddRange(begin, end)
1082 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1083 t.forReaderOffsetPieces(func(begin, end int) bool {
1086 readahead.AddRange(begin+1, end)
1093 func (t *Torrent) needData() bool {
1094 if t.closed.IsSet() {
1100 if t.pendingPieces.Len() != 0 {
1103 // Read as "not all complete".
1104 return !t.readerPieces().IterTyped(func(piece int) bool {
1105 return t.pieceComplete(piece)
1109 func appendMissingStrings(old, new []string) (ret []string) {
1112 for _, n := range new {
1113 for _, o := range old {
1118 ret = append(ret, n)
1123 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1125 for minNumTiers > len(ret) {
1126 ret = append(ret, nil)
1131 func (t *Torrent) addTrackers(announceList [][]string) {
1132 fullAnnounceList := &t.metainfo.AnnounceList
1133 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1134 for tierIndex, trackerURLs := range announceList {
1135 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1137 t.startMissingTrackerScrapers()
1138 t.updateWantPeersEvent()
1141 // Don't call this before the info is available.
1142 func (t *Torrent) bytesCompleted() int64 {
1146 return t.info.TotalLength() - t.bytesLeft()
1149 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1151 defer t.cl.mu.Unlock()
1152 return t.setInfoBytes(b)
1155 // Returns true if connection is removed from torrent.Conns.
1156 func (t *Torrent) deleteConnection(c *connection) (ret bool) {
1162 func (t *Torrent) dropConnection(c *connection) {
1163 t.cl.event.Broadcast()
1165 if t.deleteConnection(c) {
1170 func (t *Torrent) wantPeers() bool {
1171 if t.closed.IsSet() {
1174 if len(t.peers) > t.cl.config.TorrentPeersLowWater {
1177 return t.needData() || t.seeding()
1180 func (t *Torrent) updateWantPeersEvent() {
1182 t.wantPeersEvent.Set()
1184 t.wantPeersEvent.Clear()
1188 // Returns whether the client should make effort to seed the torrent.
1189 func (t *Torrent) seeding() bool {
1191 if t.closed.IsSet() {
1194 if cl.config.NoUpload {
1197 if !cl.config.Seed {
1200 if cl.config.DisableAggressiveUpload && t.needData() {
1206 func (t *Torrent) startScrapingTracker(url string) {
1210 if _, ok := t.trackerAnnouncers[url]; ok {
1213 newAnnouncer := &trackerScraper{
1217 if t.trackerAnnouncers == nil {
1218 t.trackerAnnouncers = make(map[string]*trackerScraper)
1220 t.trackerAnnouncers[url] = newAnnouncer
1221 go newAnnouncer.Run()
1224 // Adds and starts tracker scrapers for tracker URLs that aren't already
1226 func (t *Torrent) startMissingTrackerScrapers() {
1227 if t.cl.config.DisableTrackers {
1230 t.startScrapingTracker(t.metainfo.Announce)
1231 for _, tier := range t.metainfo.AnnounceList {
1232 for _, url := range tier {
1233 t.startScrapingTracker(url)
1238 // Returns an AnnounceRequest with fields filled out to defaults and current
1240 func (t *Torrent) announceRequest() tracker.AnnounceRequest {
1241 return tracker.AnnounceRequest{
1242 Event: tracker.None,
1244 Port: uint16(t.cl.incomingPeerPort()),
1245 PeerId: t.cl.peerID,
1246 InfoHash: t.infoHash,
1247 Left: t.bytesLeftAnnounce(),
1251 // Adds peers revealed in an announce until the announce ends, or we have
1253 func (t *Torrent) consumeDHTAnnounce(pvs <-chan dht.PeersValues) {
1255 // Count all the unique addresses we got during this announce.
1256 allAddrs := make(map[string]struct{})
1259 case v, ok := <-pvs:
1263 addPeers := make([]Peer, 0, len(v.Peers))
1264 for _, cp := range v.Peers {
1266 // Can't do anything with this.
1269 addPeers = append(addPeers, Peer{
1272 Source: peerSourceDHTGetPeers,
1274 key := (&net.UDPAddr{
1278 allAddrs[key] = struct{}{}
1281 t.addPeers(addPeers)
1282 numPeers := len(t.peers)
1284 if numPeers >= cl.config.TorrentPeersHighWater {
1287 case <-t.closed.LockedChan(&cl.mu):
1293 func (t *Torrent) announceDHT(impliedPort bool) (err error) {
1295 ps, err := cl.dHT.Announce(t.infoHash, cl.incomingPeerPort(), impliedPort)
1299 t.consumeDHTAnnounce(ps.Peers)
1304 func (t *Torrent) dhtAnnouncer() {
1308 case <-t.wantPeersEvent.LockedChan(&cl.mu):
1309 case <-t.closed.LockedChan(&cl.mu):
1312 err := t.announceDHT(true)
1315 defer cl.mu.Unlock()
1319 log.Printf("error announcing %q to DHT: %s", t, err)
1323 case <-t.closed.LockedChan(&cl.mu):
1325 case <-time.After(5 * time.Minute):
1330 func (t *Torrent) addPeers(peers []Peer) {
1331 for _, p := range peers {
1332 if t.cl.badPeerIPPort(p.IP, p.Port) {
1339 func (t *Torrent) Stats() TorrentStats {
1341 defer t.cl.mu.Unlock()
1343 t.stats.ActivePeers = len(t.conns)
1344 t.stats.HalfOpenPeers = len(t.halfOpen)
1345 t.stats.PendingPeers = len(t.peers)
1346 t.stats.TotalPeers = t.numTotalPeers()
1351 // The total number of peers in the torrent.
1352 func (t *Torrent) numTotalPeers() int {
1353 peers := make(map[string]struct{})
1354 for conn := range t.conns {
1355 ra := conn.conn.RemoteAddr()
1357 // It's been closed and doesn't support RemoteAddr.
1360 peers[ra.String()] = struct{}{}
1362 for addr := range t.halfOpen {
1363 peers[addr] = struct{}{}
1365 for _, peer := range t.peers {
1366 peers[fmt.Sprintf("%s:%d", peer.IP, peer.Port)] = struct{}{}
1371 // Returns true if the connection is added.
1372 func (t *Torrent) addConnection(c *connection, outgoing bool) bool {
1373 if t.cl.closed.IsSet() {
1379 for c0 := range t.conns {
1380 if c.PeerID == c0.PeerID {
1381 // Already connected to a client with that ID.
1382 duplicateClientConns.Add(1)
1383 lower := string(t.cl.peerID[:]) < string(c.PeerID[:])
1384 // Retain the connection from initiated from lower peer ID to
1386 if outgoing == lower {
1387 // Close the other one.
1389 // TODO: Is it safe to delete from the map while we're
1390 // iterating over it?
1391 t.deleteConnection(c0)
1393 // Abandon this one.
1398 if len(t.conns) >= t.maxEstablishedConns {
1399 c := t.worstBadConn()
1403 if t.cl.config.Debug && missinggo.CryHeard() {
1404 log.Printf("%s: dropping connection to make room for new one:\n %s", t, c)
1407 t.deleteConnection(c)
1409 if len(t.conns) >= t.maxEstablishedConns {
1413 panic("connection already associated with a torrent")
1415 // Reconcile bytes transferred before connection was associated with a
1417 t.stats.wroteBytes(c.stats.BytesWritten)
1418 t.stats.readBytes(c.stats.BytesRead)
1420 t.conns[c] = struct{}{}
1424 func (t *Torrent) wantConns() bool {
1425 if !t.networkingEnabled {
1428 if t.closed.IsSet() {
1431 if !t.seeding() && !t.needData() {
1434 if len(t.conns) < t.maxEstablishedConns {
1437 return t.worstBadConn() != nil
1440 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1442 defer t.cl.mu.Unlock()
1443 oldMax = t.maxEstablishedConns
1444 t.maxEstablishedConns = max
1445 wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), worseConn)
1446 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1447 t.dropConnection(wcs.Pop().(*connection))
1453 func (t *Torrent) mu() missinggo.RWLocker {
1457 func (t *Torrent) pieceHashed(piece int, correct bool) {
1458 if t.closed.IsSet() {
1461 p := &t.pieces[piece]
1462 touchers := t.reapPieceTouchers(piece)
1464 // Don't score the first time a piece is hashed, it could be an
1467 pieceHashedCorrect.Add(1)
1469 log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.hash, len(touchers))
1470 pieceHashedNotCorrect.Add(1)
1475 for _, c := range touchers {
1476 c.goodPiecesDirtied++
1478 err := p.Storage().MarkComplete()
1480 log.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
1482 t.updatePieceCompletion(piece)
1484 if len(touchers) != 0 {
1485 for _, c := range touchers {
1486 // Y u do dis peer?!
1487 c.badPiecesDirtied++
1489 slices.Sort(touchers, connLessTrusted)
1490 if t.cl.config.Debug {
1491 log.Printf("dropping first corresponding conn from trust: %v", func() (ret []int) {
1492 for _, c := range touchers {
1493 ret = append(ret, c.netGoodPiecesDirtied())
1499 t.cl.banPeerIP(missinggo.AddrIP(c.remoteAddr()))
1502 t.onIncompletePiece(piece)
1506 func (t *Torrent) cancelRequestsForPiece(piece int) {
1507 // TODO: Make faster
1508 for cn := range t.conns {
1513 func (t *Torrent) onPieceCompleted(piece int) {
1514 t.pendingPieces.Remove(piece)
1515 t.pendAllChunkSpecs(piece)
1516 t.cancelRequestsForPiece(piece)
1517 for conn := range t.conns {
1522 func (t *Torrent) onIncompletePiece(piece int) {
1523 if t.pieceAllDirty(piece) {
1524 t.pendAllChunkSpecs(piece)
1526 if !t.wantPieceIndex(piece) {
1529 // We could drop any connections that we told we have a piece that we
1530 // don't here. But there's a test failure, and it seems clients don't care
1531 // if you request pieces that you already claim to have. Pruning bad
1532 // connections might just remove any connections that aren't treating us
1533 // favourably anyway.
1535 // for c := range t.conns {
1536 // if c.sentHave(piece) {
1540 for conn := range t.conns {
1541 if conn.PeerHasPiece(piece) {
1542 conn.updateRequests()
1547 func (t *Torrent) verifyPiece(piece int) {
1550 defer cl.mu.Unlock()
1551 p := &t.pieces[piece]
1554 cl.event.Broadcast()
1556 for p.hashing || t.storage == nil {
1559 if !p.t.piecesQueuedForHash.Remove(piece) {
1560 panic("piece was not queued")
1562 if t.closed.IsSet() || t.pieceComplete(piece) {
1563 t.updatePiecePriority(piece)
1567 t.publishPieceChange(piece)
1568 t.storageLock.RLock()
1570 sum := t.hashPiece(piece)
1571 t.storageLock.RUnlock()
1574 t.pieceHashed(piece, sum == p.hash)
1575 t.publishPieceChange(piece)
1578 // Return the connections that touched a piece, and clear the entry while
1580 func (t *Torrent) reapPieceTouchers(piece int) (ret []*connection) {
1581 for c := range t.conns {
1582 if _, ok := c.peerTouchedPieces[piece]; ok {
1583 ret = append(ret, c)
1584 delete(c.peerTouchedPieces, piece)
1590 func (t *Torrent) connsAsSlice() (ret []*connection) {
1591 for c := range t.conns {
1592 ret = append(ret, c)
1597 // Currently doesn't really queue, but should in the future.
1598 func (t *Torrent) queuePieceCheck(pieceIndex int) {
1599 piece := &t.pieces[pieceIndex]
1600 if piece.queuedForHash() {
1603 t.piecesQueuedForHash.Add(pieceIndex)
1604 t.publishPieceChange(pieceIndex)
1605 go t.verifyPiece(pieceIndex)
1608 func (t *Torrent) VerifyData() {
1609 for i := range iter.N(t.NumPieces()) {
1610 t.Piece(i).VerifyData()