19 "github.com/anacrolix/dht"
20 "github.com/anacrolix/missinggo"
21 "github.com/anacrolix/missinggo/bitmap"
22 "github.com/anacrolix/missinggo/perf"
23 "github.com/anacrolix/missinggo/pubsub"
24 "github.com/anacrolix/missinggo/slices"
25 "github.com/bradfitz/iter"
27 "github.com/anacrolix/torrent/bencode"
28 "github.com/anacrolix/torrent/metainfo"
29 pp "github.com/anacrolix/torrent/peer_protocol"
30 "github.com/anacrolix/torrent/storage"
31 "github.com/anacrolix/torrent/tracker"
34 func (t *Torrent) chunkIndexSpec(chunkIndex, piece int) chunkSpec {
35 return chunkIndexSpec(chunkIndex, t.pieceLength(piece), t.chunkSize)
38 type peersKey struct {
43 // Maintains state of torrent within a Client.
47 networkingEnabled bool
50 closed missinggo.Event
51 infoHash metainfo.Hash
53 // Values are the piece indices that changed.
54 pieceStateChanges *pubsub.PubSub
55 // The size of chunks to request from peers over the wire. This is
56 // normally 16KiB by convention these days.
59 // Total length of the torrent in bytes. Stored because it's not O(1) to
60 // get this from the info dict.
63 // The storage to open when the info dict becomes available.
64 storageOpener *storage.Client
65 // Storage for torrent data.
66 storage *storage.Torrent
67 // Read-locked for using storage, and write-locked for Closing.
68 storageLock sync.RWMutex
70 metainfo metainfo.MetaInfo
72 // The info dict. nil if we don't have it (yet).
75 // Active peer connections, running message stream loops.
76 conns map[*connection]struct{}
77 maxEstablishedConns int
78 // Set of addrs to which we're attempting to connect. Connections are
79 // half-open until all handshakes are completed.
80 halfOpen map[string]Peer
81 fastestConn *connection
83 // Reserve of peers to connect to. A peer can be both here and in the
84 // active connections if were told about the peer after connecting with
85 // them. That encourages us to reconnect to peers that are well known in
87 peers map[peersKey]Peer
88 wantPeersEvent missinggo.Event
89 // An announcer for each tracker URL.
90 trackerAnnouncers map[string]*trackerScraper
91 // How many times we've initiated a DHT announce. TODO: Move into stats.
94 // Name used if the info name isn't available. Should be cleared when the
95 // Info does become available.
98 // The bencoded bytes of the info dict. This is actively manipulated if
99 // the info bytes aren't initially available, and we try to fetch them
102 // Each element corresponds to the 16KiB metadata pieces. If true, we have
103 // received that piece.
104 metadataCompletedChunks []bool
105 metadataChanged sync.Cond
107 // Set when .Info is obtained.
108 gotMetainfo missinggo.Event
110 readers map[*Reader]struct{}
111 readerNowPieces bitmap.Bitmap
112 readerReadaheadPieces bitmap.Bitmap
114 // The indexes of pieces we want with normal priority, that aren't
115 // currently available.
116 pendingPieces bitmap.Bitmap
117 // A cache of completed piece indices.
118 completedPieces bitmap.Bitmap
119 // Pieces that need to be hashed.
120 piecesQueuedForHash bitmap.Bitmap
122 // A pool of piece priorities []int for assignment to new connections.
123 // These "inclinations" are used to give connections preference for
125 connPieceInclinationPool sync.Pool
126 // Torrent-level statistics.
130 // Returns a channel that is closed when the Torrent is closed.
131 func (t *Torrent) Closed() <-chan struct{} {
132 return t.closed.LockedChan(&t.cl.mu)
135 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
136 // pending, and half-open peers.
137 func (t *Torrent) KnownSwarm() (ks []Peer) {
138 // Add pending peers to the list
139 for _, peer := range t.peers {
140 ks = append(ks, peer)
143 // Add half-open peers to the list
144 for _, peer := range t.halfOpen {
145 ks = append(ks, peer)
148 // Add active peers to the list
149 for conn := range t.conns {
150 host, portString, err := net.SplitHostPort(conn.remoteAddr().String())
155 ip := net.ParseIP(host)
156 port, err := strconv.Atoi(portString)
161 ks = append(ks, Peer{
165 Source: conn.Discovery,
166 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
167 // > But if we're not connected to them with an encrypted connection, I couldn't say
168 // > what's appropriate. We can carry forward the SupportsEncryption value as we
169 // > received it from trackers/DHT/PEX, or just use the encryption state for the
170 // > connection. It's probably easiest to do the latter for now.
171 // https://github.com/anacrolix/torrent/pull/188
172 SupportsEncryption: conn.headerEncrypted,
179 func (t *Torrent) setChunkSize(size pp.Integer) {
181 t.chunkPool = &sync.Pool{
182 New: func() interface{} {
183 b := make([]byte, size)
189 func (t *Torrent) setDisplayName(dn string) {
196 func (t *Torrent) pieceComplete(piece int) bool {
197 return t.completedPieces.Get(piece)
200 func (t *Torrent) pieceCompleteUncached(piece int) storage.Completion {
201 return t.pieces[piece].Storage().Completion()
204 // There's a connection to that address already.
205 func (t *Torrent) addrActive(addr string) bool {
206 if _, ok := t.halfOpen[addr]; ok {
209 for c := range t.conns {
214 if ra.String() == addr {
221 func (t *Torrent) unclosedConnsAsSlice() (ret []*connection) {
222 ret = make([]*connection, 0, len(t.conns))
223 for c := range t.conns {
224 if !c.closed.IsSet() {
231 func (t *Torrent) addPeer(p Peer) {
234 if len(t.peers) >= cl.config.TorrentPeersHighWater {
237 key := peersKey{string(p.IP), p.Port}
238 if _, ok := t.peers[key]; ok {
242 peersAddedBySource.Add(string(p.Source), 1)
247 func (t *Torrent) invalidateMetadata() {
248 for i := range t.metadataCompletedChunks {
249 t.metadataCompletedChunks[i] = false
254 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
258 if index >= len(t.metadataCompletedChunks) {
259 log.Printf("%s: ignoring metadata piece %d", t, index)
262 copy(t.metadataBytes[(1<<14)*index:], data)
263 t.metadataCompletedChunks[index] = true
266 func (t *Torrent) metadataPieceCount() int {
267 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
270 func (t *Torrent) haveMetadataPiece(piece int) bool {
272 return (1<<14)*piece < len(t.metadataBytes)
274 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
278 func (t *Torrent) metadataSizeKnown() bool {
279 return t.metadataBytes != nil
282 func (t *Torrent) metadataSize() int {
283 return len(t.metadataBytes)
286 func infoPieceHashes(info *metainfo.Info) (ret []string) {
287 for i := 0; i < len(info.Pieces); i += sha1.Size {
288 ret = append(ret, string(info.Pieces[i:i+sha1.Size]))
293 func (t *Torrent) makePieces() {
294 hashes := infoPieceHashes(t.info)
295 t.pieces = make([]Piece, len(hashes))
296 for i, hash := range hashes {
297 piece := &t.pieces[i]
300 piece.noPendingWrites.L = &piece.pendingWritesMutex
301 missinggo.CopyExact(piece.hash[:], hash)
305 // Called when metadata for a torrent becomes available.
306 func (t *Torrent) setInfoBytes(b []byte) error {
310 if metainfo.HashBytes(b) != t.infoHash {
311 return errors.New("info bytes have wrong hash")
313 var info metainfo.Info
314 err := bencode.Unmarshal(b, &info)
316 return fmt.Errorf("error unmarshalling info bytes: %s", err)
318 err = validateInfo(&info)
320 return fmt.Errorf("bad info: %s", err)
322 defer t.updateWantPeersEvent()
324 t.displayName = "" // Save a few bytes lol.
325 t.cl.event.Broadcast()
327 t.storage, err = t.storageOpener.OpenTorrent(t.info, t.infoHash)
329 return fmt.Errorf("error opening torrent storage: %s", err)
332 for _, f := range t.info.UpvertedFiles() {
336 t.metadataCompletedChunks = nil
338 for conn := range t.conns {
339 if err := conn.setNumPieces(t.numPieces()); err != nil {
340 log.Printf("closing connection: %s", err)
344 for i := range t.pieces {
345 t.updatePieceCompletion(i)
347 if !p.storageCompletionOk {
348 // log.Printf("piece %s completion unknown, queueing check", p)
355 func (t *Torrent) haveAllMetadataPieces() bool {
359 if t.metadataCompletedChunks == nil {
362 for _, have := range t.metadataCompletedChunks {
370 // TODO: Propagate errors to disconnect peer.
371 func (t *Torrent) setMetadataSize(bytes int64) (err error) {
373 // We already know the correct metadata size.
376 if bytes <= 0 || bytes > 10000000 { // 10MB, pulled from my ass.
377 return errors.New("bad size")
379 if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
382 t.metadataBytes = make([]byte, bytes)
383 t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
384 t.metadataChanged.Broadcast()
385 for c := range t.conns {
386 c.requestPendingMetadata()
391 // The current working name for the torrent. Either the name in the info dict,
392 // or a display name given such as by the dn value in a magnet link, or "".
393 func (t *Torrent) name() string {
400 func (t *Torrent) pieceState(index int) (ret PieceState) {
401 p := &t.pieces[index]
402 ret.Priority = t.piecePriority(index)
403 if t.pieceComplete(index) {
406 if p.queuedForHash() || p.hashing {
409 if !ret.Complete && t.piecePartiallyDownloaded(index) {
415 func (t *Torrent) metadataPieceSize(piece int) int {
416 return metadataPieceSize(len(t.metadataBytes), piece)
419 func (t *Torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
425 d["total_size"] = len(t.metadataBytes)
427 p, err := bencode.Marshal(d)
433 ExtendedID: c.PeerExtensionIDs["ut_metadata"],
434 ExtendedPayload: append(p, data...),
438 func (t *Torrent) pieceStateRuns() (ret []PieceStateRun) {
439 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
440 ret = append(ret, PieceStateRun{
441 PieceState: el.(PieceState),
445 for index := range t.pieces {
446 rle.Append(t.pieceState(index), 1)
452 // Produces a small string representing a PieceStateRun.
453 func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
454 ret = fmt.Sprintf("%d", psr.Length)
455 ret += func() string {
456 switch psr.Priority {
457 case PiecePriorityNext:
459 case PiecePriorityNormal:
461 case PiecePriorityReadahead:
463 case PiecePriorityNow:
481 func (t *Torrent) writeStatus(w io.Writer) {
482 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
483 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
485 fmt.Fprintf(w, "Metadata have: ")
486 for _, h := range t.metadataCompletedChunks {
487 fmt.Fprintf(w, "%c", func() rune {
497 fmt.Fprintf(w, "Piece length: %s\n", func() string {
499 return fmt.Sprint(t.usualPieceSize())
505 fmt.Fprintf(w, "Num Pieces: %d\n", t.numPieces())
506 fmt.Fprint(w, "Piece States:")
507 for _, psr := range t.pieceStateRuns() {
509 w.Write([]byte(pieceStateRunStatusChars(psr)))
513 fmt.Fprintf(w, "Reader Pieces:")
514 t.forReaderOffsetPieces(func(begin, end int) (again bool) {
515 fmt.Fprintf(w, " %d:%d", begin, end)
520 fmt.Fprintf(w, "Trackers:\n")
522 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
523 fmt.Fprintf(tw, " URL\tNext announce\tLast announce\n")
524 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r *trackerScraper) bool {
526 }).([]*trackerScraper) {
527 fmt.Fprintf(tw, " %s\n", ta.statusLine())
532 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
534 fmt.Fprintf(w, "Pending peers: %d\n", len(t.peers))
535 fmt.Fprintf(w, "Half open: %d\n", len(t.halfOpen))
536 fmt.Fprintf(w, "Active peers: %d\n", len(t.conns))
537 conns := t.connsAsSlice()
538 slices.Sort(conns, worseConn)
539 for i, c := range conns {
540 fmt.Fprintf(w, "%2d. ", i+1)
545 func (t *Torrent) haveInfo() bool {
549 // Returns a run-time generated MetaInfo that includes the info bytes and
550 // announce-list as currently known to the client.
551 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
552 return metainfo.MetaInfo{
553 CreationDate: time.Now().Unix(),
554 Comment: "dynamic metainfo from client",
555 CreatedBy: "go.torrent",
556 AnnounceList: t.metainfo.UpvertedAnnounceList(),
557 InfoBytes: func() []byte {
559 return t.metadataBytes
567 func (t *Torrent) BytesMissing() int64 {
569 defer t.mu().RUnlock()
570 return t.bytesMissingLocked()
573 func (t *Torrent) bytesMissingLocked() int64 {
577 func (t *Torrent) bytesLeft() (left int64) {
578 bitmap.Flip(t.completedPieces, 0, t.numPieces()).IterTyped(func(piece int) bool {
579 p := &t.pieces[piece]
580 left += int64(p.length() - p.numDirtyBytes())
586 // Bytes left to give in tracker announces.
587 func (t *Torrent) bytesLeftAnnounce() uint64 {
589 return uint64(t.bytesLeft())
591 return math.MaxUint64
595 func (t *Torrent) piecePartiallyDownloaded(piece int) bool {
596 if t.pieceComplete(piece) {
599 if t.pieceAllDirty(piece) {
602 return t.pieces[piece].hasDirtyChunks()
605 func (t *Torrent) usualPieceSize() int {
606 return int(t.info.PieceLength)
609 func (t *Torrent) numPieces() int {
610 return t.info.NumPieces()
613 func (t *Torrent) numPiecesCompleted() (num int) {
614 return t.completedPieces.Len()
617 func (t *Torrent) close() (err error) {
619 if t.storage != nil {
622 t.storageLock.Unlock()
624 for conn := range t.conns {
627 t.cl.event.Broadcast()
628 t.pieceStateChanges.Close()
629 t.updateWantPeersEvent()
633 func (t *Torrent) requestOffset(r request) int64 {
634 return torrentRequestOffset(t.length, int64(t.usualPieceSize()), r)
637 // Return the request that would include the given offset into the torrent
638 // data. Returns !ok if there is no such request.
639 func (t *Torrent) offsetRequest(off int64) (req request, ok bool) {
640 return torrentOffsetRequest(t.length, t.info.PieceLength, int64(t.chunkSize), off)
643 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
644 tr := perf.NewTimer()
646 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
647 if err == nil && n != len(data) {
648 err = io.ErrShortWrite
651 tr.Mark("write chunk")
656 func (t *Torrent) bitfield() (bf []bool) {
657 bf = make([]bool, t.numPieces())
658 t.completedPieces.IterTyped(func(piece int) (again bool) {
665 func (t *Torrent) pieceNumChunks(piece int) int {
666 return int((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
669 func (t *Torrent) pendAllChunkSpecs(pieceIndex int) {
670 t.pieces[pieceIndex].dirtyChunks.Clear()
678 // Peer is known to support encryption.
679 SupportsEncryption bool
682 func (t *Torrent) pieceLength(piece int) pp.Integer {
683 if piece == t.numPieces()-1 {
684 ret := pp.Integer(t.length % t.info.PieceLength)
689 return pp.Integer(t.info.PieceLength)
692 func (t *Torrent) hashPiece(piece int) (ret metainfo.Hash) {
693 hash := pieceHash.New()
694 p := &t.pieces[piece]
695 p.waitNoPendingWrites()
696 ip := t.info.Piece(piece)
698 n, err := io.Copy(hash, io.NewSectionReader(t.pieces[piece].Storage(), 0, pl))
700 missinggo.CopyExact(&ret, hash.Sum(nil))
703 if err != io.ErrUnexpectedEOF && !os.IsNotExist(err) {
704 log.Printf("unexpected error hashing piece with %T: %s", t.storage.TorrentImpl, err)
709 func (t *Torrent) haveAnyPieces() bool {
710 for i := range t.pieces {
711 if t.pieceComplete(i) {
718 func (t *Torrent) havePiece(index int) bool {
719 return t.haveInfo() && t.pieceComplete(index)
722 func (t *Torrent) haveChunk(r request) (ret bool) {
724 // log.Println("have chunk", r, ret)
729 if t.pieceComplete(int(r.Index)) {
732 p := &t.pieces[r.Index]
733 return !p.pendingChunk(r.chunkSpec, t.chunkSize)
736 func chunkIndex(cs chunkSpec, chunkSize pp.Integer) int {
737 return int(cs.Begin / chunkSize)
740 func (t *Torrent) wantPiece(r request) bool {
741 if !t.wantPieceIndex(int(r.Index)) {
744 if t.pieces[r.Index].pendingChunk(r.chunkSpec, t.chunkSize) {
747 // TODO: What about pieces that were wanted, but aren't now, and aren't
748 // completed either? That used to be done here.
752 func (t *Torrent) wantPieceIndex(index int) bool {
756 if index < 0 || index >= t.numPieces() {
759 p := &t.pieces[index]
760 if p.queuedForHash() {
766 if t.pieceComplete(index) {
769 if t.pendingPieces.Contains(index) {
772 return !t.forReaderOffsetPieces(func(begin, end int) bool {
773 return index < begin || index >= end
777 // The worst connection is one that hasn't been sent, or sent anything useful
778 // for the longest. A bad connection is one that usually sends us unwanted
779 // pieces, or has been in worser half of the established connections for more
781 func (t *Torrent) worstBadConn() *connection {
782 wcs := worseConnSlice{t.unclosedConnsAsSlice()}
785 c := heap.Pop(&wcs).(*connection)
786 if c.UnwantedChunksReceived >= 6 && c.UnwantedChunksReceived > c.UsefulChunksReceived {
789 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
790 // Give connections 1 minute to prove themselves.
791 if time.Since(c.completedHandshake) > time.Minute {
799 type PieceStateChange struct {
804 func (t *Torrent) publishPieceChange(piece int) {
805 cur := t.pieceState(piece)
806 p := &t.pieces[piece]
807 if cur != p.publicPieceState {
808 p.publicPieceState = cur
809 t.pieceStateChanges.Publish(PieceStateChange{
816 func (t *Torrent) pieceNumPendingChunks(piece int) int {
817 if t.pieceComplete(piece) {
820 return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
823 func (t *Torrent) pieceAllDirty(piece int) bool {
824 return t.pieces[piece].dirtyChunks.Len() == t.pieceNumChunks(piece)
827 func (t *Torrent) readersChanged() {
828 t.updateReaderPieces()
829 t.updateAllPiecePriorities()
832 func (t *Torrent) updateReaderPieces() {
833 t.readerNowPieces, t.readerReadaheadPieces = t.readerPiecePriorities()
836 func (t *Torrent) readerPosChanged(from, to pieceRange) {
840 t.updateReaderPieces()
841 // Order the ranges, high and low.
843 if l.begin > h.begin {
847 // Two distinct ranges.
848 t.updatePiecePriorities(l.begin, l.end)
849 t.updatePiecePriorities(h.begin, h.end)
856 t.updatePiecePriorities(l.begin, end)
860 func (t *Torrent) maybeNewConns() {
861 // Tickle the accept routine.
862 t.cl.event.Broadcast()
866 func (t *Torrent) piecePriorityChanged(piece int) {
867 for c := range t.conns {
868 if c.updatePiecePriority(piece) {
873 t.publishPieceChange(piece)
876 func (t *Torrent) updatePiecePriority(piece int) {
877 p := &t.pieces[piece]
878 newPrio := t.piecePriorityUncached(piece)
879 if newPrio == p.priority {
883 t.piecePriorityChanged(piece)
886 func (t *Torrent) updateAllPiecePriorities() {
887 t.updatePiecePriorities(0, len(t.pieces))
890 // Update all piece priorities in one hit. This function should have the same
891 // output as updatePiecePriority, but across all pieces.
892 func (t *Torrent) updatePiecePriorities(begin, end int) {
893 for i := begin; i < end; i++ {
894 t.updatePiecePriority(i)
898 // Returns the range of pieces [begin, end) that contains the extent of bytes.
899 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end int) {
910 begin = int(off / t.info.PieceLength)
911 end = int((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
912 if end > t.info.NumPieces() {
913 end = t.info.NumPieces()
918 // Returns true if all iterations complete without breaking. Returns the read
919 // regions for all readers. The reader regions should not be merged as some
920 // callers depend on this method to enumerate readers.
921 func (t *Torrent) forReaderOffsetPieces(f func(begin, end int) (more bool)) (all bool) {
922 for r := range t.readers {
924 if p.begin >= p.end {
927 if !f(p.begin, p.end) {
934 func (t *Torrent) piecePriority(piece int) piecePriority {
936 return PiecePriorityNone
938 return t.pieces[piece].priority
941 func (t *Torrent) piecePriorityUncached(piece int) piecePriority {
942 if t.pieceComplete(piece) {
943 return PiecePriorityNone
945 if t.readerNowPieces.Contains(piece) {
946 return PiecePriorityNow
948 // if t.readerNowPieces.Contains(piece - 1) {
949 // return PiecePriorityNext
951 if t.readerReadaheadPieces.Contains(piece) {
952 return PiecePriorityReadahead
954 if t.pendingPieces.Contains(piece) {
955 return PiecePriorityNormal
957 return PiecePriorityNone
960 func (t *Torrent) pendPiece(piece int) {
961 if t.pendingPieces.Contains(piece) {
964 if t.havePiece(piece) {
967 t.pendingPieces.Add(piece)
968 t.updatePiecePriority(piece)
971 func (t *Torrent) unpendPieces(unpend bitmap.Bitmap) {
972 t.pendingPieces.Sub(unpend)
973 unpend.IterTyped(func(piece int) (again bool) {
974 t.updatePiecePriority(piece)
979 func (t *Torrent) pendPieceRange(begin, end int) {
980 for i := begin; i < end; i++ {
985 func (t *Torrent) unpendPieceRange(begin, end int) {
987 bm.AddRange(begin, end)
991 func (t *Torrent) pendRequest(req request) {
992 ci := chunkIndex(req.chunkSpec, t.chunkSize)
993 t.pieces[req.Index].pendChunkIndex(ci)
996 func (t *Torrent) pieceCompletionChanged(piece int) {
997 t.cl.event.Broadcast()
998 if t.pieceComplete(piece) {
999 t.onPieceCompleted(piece)
1001 t.onIncompletePiece(piece)
1003 t.updatePiecePriority(piece)
1006 func (t *Torrent) openNewConns() {
1007 t.cl.openNewConns(t)
1010 func (t *Torrent) getConnPieceInclination() []int {
1011 _ret := t.connPieceInclinationPool.Get()
1013 pieceInclinationsNew.Add(1)
1014 return rand.Perm(t.numPieces())
1016 pieceInclinationsReused.Add(1)
1017 return *_ret.(*[]int)
1020 func (t *Torrent) putPieceInclination(pi []int) {
1021 t.connPieceInclinationPool.Put(&pi)
1022 pieceInclinationsPut.Add(1)
1025 func (t *Torrent) updatePieceCompletion(piece int) {
1026 pcu := t.pieceCompleteUncached(piece)
1027 p := &t.pieces[piece]
1028 changed := t.completedPieces.Get(piece) != pcu.Complete || p.storageCompletionOk != pcu.Ok
1029 p.storageCompletionOk = pcu.Ok
1030 t.completedPieces.Set(piece, pcu.Complete)
1032 t.pieceCompletionChanged(piece)
1036 // Non-blocking read. Client lock is not required.
1037 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1038 p := &t.pieces[off/t.info.PieceLength]
1039 p.waitNoPendingWrites()
1040 return p.Storage().ReadAt(b, off-p.Info().Offset())
1043 func (t *Torrent) updateAllPieceCompletions() {
1044 for i := range iter.N(t.numPieces()) {
1045 t.updatePieceCompletion(i)
1049 // Returns an error if the metadata was completed, but couldn't be set for
1050 // some reason. Blame it on the last peer to contribute.
1051 func (t *Torrent) maybeCompleteMetadata() error {
1056 if !t.haveAllMetadataPieces() {
1057 // Don't have enough metadata pieces.
1060 err := t.setInfoBytes(t.metadataBytes)
1062 t.invalidateMetadata()
1063 return fmt.Errorf("error setting info bytes: %s", err)
1065 if t.cl.config.Debug {
1066 log.Printf("%s: got metadata from peers", t)
1071 func (t *Torrent) readerPieces() (ret bitmap.Bitmap) {
1072 t.forReaderOffsetPieces(func(begin, end int) bool {
1073 ret.AddRange(begin, end)
1079 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1080 t.forReaderOffsetPieces(func(begin, end int) bool {
1083 readahead.AddRange(begin+1, end)
1090 func (t *Torrent) needData() bool {
1091 if t.closed.IsSet() {
1097 if t.pendingPieces.Len() != 0 {
1100 // Read as "not all complete".
1101 return !t.readerPieces().IterTyped(func(piece int) bool {
1102 return t.pieceComplete(piece)
1106 func appendMissingStrings(old, new []string) (ret []string) {
1109 for _, n := range new {
1110 for _, o := range old {
1115 ret = append(ret, n)
1120 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1122 for minNumTiers > len(ret) {
1123 ret = append(ret, nil)
1128 func (t *Torrent) addTrackers(announceList [][]string) {
1129 fullAnnounceList := &t.metainfo.AnnounceList
1130 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1131 for tierIndex, trackerURLs := range announceList {
1132 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1134 t.startMissingTrackerScrapers()
1135 t.updateWantPeersEvent()
1138 // Don't call this before the info is available.
1139 func (t *Torrent) bytesCompleted() int64 {
1143 return t.info.TotalLength() - t.bytesLeft()
1146 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1148 defer t.cl.mu.Unlock()
1149 return t.setInfoBytes(b)
1152 // Returns true if connection is removed from torrent.Conns.
1153 func (t *Torrent) deleteConnection(c *connection) (ret bool) {
1159 func (t *Torrent) dropConnection(c *connection) {
1160 t.cl.event.Broadcast()
1162 if t.deleteConnection(c) {
1167 func (t *Torrent) wantPeers() bool {
1168 if t.closed.IsSet() {
1171 if len(t.peers) > t.cl.config.TorrentPeersLowWater {
1174 return t.needData() || t.seeding()
1177 func (t *Torrent) updateWantPeersEvent() {
1179 t.wantPeersEvent.Set()
1181 t.wantPeersEvent.Clear()
1185 // Returns whether the client should make effort to seed the torrent.
1186 func (t *Torrent) seeding() bool {
1188 if t.closed.IsSet() {
1191 if cl.config.NoUpload {
1194 if !cl.config.Seed {
1197 if cl.config.DisableAggressiveUpload && t.needData() {
1203 func (t *Torrent) startScrapingTracker(url string) {
1207 if _, ok := t.trackerAnnouncers[url]; ok {
1210 newAnnouncer := &trackerScraper{
1214 if t.trackerAnnouncers == nil {
1215 t.trackerAnnouncers = make(map[string]*trackerScraper)
1217 t.trackerAnnouncers[url] = newAnnouncer
1218 go newAnnouncer.Run()
1221 // Adds and starts tracker scrapers for tracker URLs that aren't already
1223 func (t *Torrent) startMissingTrackerScrapers() {
1224 if t.cl.config.DisableTrackers {
1227 t.startScrapingTracker(t.metainfo.Announce)
1228 for _, tier := range t.metainfo.AnnounceList {
1229 for _, url := range tier {
1230 t.startScrapingTracker(url)
1235 // Returns an AnnounceRequest with fields filled out to defaults and current
1237 func (t *Torrent) announceRequest() tracker.AnnounceRequest {
1238 return tracker.AnnounceRequest{
1239 Event: tracker.None,
1241 Port: uint16(t.cl.incomingPeerPort()),
1242 PeerId: t.cl.peerID,
1243 InfoHash: t.infoHash,
1244 Left: t.bytesLeftAnnounce(),
1248 // Adds peers revealed in an announce until the announce ends, or we have
1250 func (t *Torrent) consumeDHTAnnounce(pvs <-chan dht.PeersValues) {
1252 // Count all the unique addresses we got during this announce.
1253 allAddrs := make(map[string]struct{})
1256 case v, ok := <-pvs:
1260 addPeers := make([]Peer, 0, len(v.Peers))
1261 for _, cp := range v.Peers {
1263 // Can't do anything with this.
1266 addPeers = append(addPeers, Peer{
1269 Source: peerSourceDHTGetPeers,
1271 key := (&net.UDPAddr{
1275 allAddrs[key] = struct{}{}
1278 t.addPeers(addPeers)
1279 numPeers := len(t.peers)
1281 if numPeers >= cl.config.TorrentPeersHighWater {
1284 case <-t.closed.LockedChan(&cl.mu):
1290 func (t *Torrent) announceDHT(impliedPort bool) (err error) {
1292 ps, err := cl.dHT.Announce(t.infoHash, cl.incomingPeerPort(), impliedPort)
1296 t.consumeDHTAnnounce(ps.Peers)
1301 func (t *Torrent) dhtAnnouncer() {
1305 case <-t.wantPeersEvent.LockedChan(&cl.mu):
1306 case <-t.closed.LockedChan(&cl.mu):
1309 err := t.announceDHT(true)
1312 defer cl.mu.Unlock()
1316 log.Printf("error announcing %q to DHT: %s", t, err)
1320 case <-t.closed.LockedChan(&cl.mu):
1322 case <-time.After(5 * time.Minute):
1327 func (t *Torrent) addPeers(peers []Peer) {
1328 for _, p := range peers {
1329 if t.cl.badPeerIPPort(p.IP, p.Port) {
1336 func (t *Torrent) Stats() TorrentStats {
1338 defer t.cl.mu.Unlock()
1340 t.stats.ActivePeers = len(t.conns)
1341 t.stats.HalfOpenPeers = len(t.halfOpen)
1342 t.stats.PendingPeers = len(t.peers)
1343 t.stats.TotalPeers = t.numTotalPeers()
1348 // The total number of peers in the torrent.
1349 func (t *Torrent) numTotalPeers() int {
1350 peers := make(map[string]struct{})
1351 for conn := range t.conns {
1352 ra := conn.conn.RemoteAddr()
1354 // It's been closed and doesn't support RemoteAddr.
1357 peers[ra.String()] = struct{}{}
1359 for addr := range t.halfOpen {
1360 peers[addr] = struct{}{}
1362 for _, peer := range t.peers {
1363 peers[fmt.Sprintf("%s:%d", peer.IP, peer.Port)] = struct{}{}
1368 // Returns true if the connection is added.
1369 func (t *Torrent) addConnection(c *connection, outgoing bool) bool {
1370 if t.cl.closed.IsSet() {
1376 for c0 := range t.conns {
1377 if c.PeerID == c0.PeerID {
1378 // Already connected to a client with that ID.
1379 duplicateClientConns.Add(1)
1380 lower := string(t.cl.peerID[:]) < string(c.PeerID[:])
1381 // Retain the connection from initiated from lower peer ID to
1383 if outgoing == lower {
1384 // Close the other one.
1386 // TODO: Is it safe to delete from the map while we're
1387 // iterating over it?
1388 t.deleteConnection(c0)
1390 // Abandon this one.
1395 if len(t.conns) >= t.maxEstablishedConns {
1396 c := t.worstBadConn()
1400 if t.cl.config.Debug && missinggo.CryHeard() {
1401 log.Printf("%s: dropping connection to make room for new one:\n %s", t, c)
1404 t.deleteConnection(c)
1406 if len(t.conns) >= t.maxEstablishedConns {
1410 panic("connection already associated with a torrent")
1412 // Reconcile bytes transferred before connection was associated with a
1414 t.stats.wroteBytes(c.stats.BytesWritten)
1415 t.stats.readBytes(c.stats.BytesRead)
1417 t.conns[c] = struct{}{}
1421 func (t *Torrent) wantConns() bool {
1422 if !t.networkingEnabled {
1425 if t.closed.IsSet() {
1428 if !t.seeding() && !t.needData() {
1431 if len(t.conns) < t.maxEstablishedConns {
1434 return t.worstBadConn() != nil
1437 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1439 defer t.cl.mu.Unlock()
1440 oldMax = t.maxEstablishedConns
1441 t.maxEstablishedConns = max
1442 wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), worseConn)
1443 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1444 t.dropConnection(wcs.Pop().(*connection))
1450 func (t *Torrent) mu() missinggo.RWLocker {
1454 func (t *Torrent) pieceHashed(piece int, correct bool) {
1455 if t.closed.IsSet() {
1458 p := &t.pieces[piece]
1459 touchers := t.reapPieceTouchers(piece)
1461 // Don't score the first time a piece is hashed, it could be an
1464 pieceHashedCorrect.Add(1)
1466 log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.hash, len(touchers))
1467 pieceHashedNotCorrect.Add(1)
1472 for _, c := range touchers {
1473 c.goodPiecesDirtied++
1475 err := p.Storage().MarkComplete()
1477 log.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
1479 t.updatePieceCompletion(piece)
1481 if len(touchers) != 0 {
1482 for _, c := range touchers {
1483 // Y u do dis peer?!
1484 c.badPiecesDirtied++
1486 slices.Sort(touchers, connLessTrusted)
1487 if t.cl.config.Debug {
1488 log.Printf("dropping first corresponding conn from trust: %v", func() (ret []int) {
1489 for _, c := range touchers {
1490 ret = append(ret, c.netGoodPiecesDirtied())
1496 t.cl.banPeerIP(missinggo.AddrIP(c.remoteAddr()))
1499 t.onIncompletePiece(piece)
1503 func (t *Torrent) cancelRequestsForPiece(piece int) {
1504 // TODO: Make faster
1505 for cn := range t.conns {
1510 func (t *Torrent) onPieceCompleted(piece int) {
1511 t.pendingPieces.Remove(piece)
1512 t.pendAllChunkSpecs(piece)
1513 t.cancelRequestsForPiece(piece)
1514 for conn := range t.conns {
1519 func (t *Torrent) onIncompletePiece(piece int) {
1520 if t.pieceAllDirty(piece) {
1521 t.pendAllChunkSpecs(piece)
1523 if !t.wantPieceIndex(piece) {
1526 // We could drop any connections that we told we have a piece that we
1527 // don't here. But there's a test failure, and it seems clients don't care
1528 // if you request pieces that you already claim to have. Pruning bad
1529 // connections might just remove any connections that aren't treating us
1530 // favourably anyway.
1532 // for c := range t.conns {
1533 // if c.sentHave(piece) {
1537 for conn := range t.conns {
1538 if conn.PeerHasPiece(piece) {
1539 conn.updateRequests()
1544 func (t *Torrent) verifyPiece(piece int) {
1547 defer cl.mu.Unlock()
1548 p := &t.pieces[piece]
1551 cl.event.Broadcast()
1553 for p.hashing || t.storage == nil {
1556 if !p.t.piecesQueuedForHash.Remove(piece) {
1557 panic("piece was not queued")
1559 if t.closed.IsSet() || t.pieceComplete(piece) {
1560 t.updatePiecePriority(piece)
1564 t.publishPieceChange(piece)
1565 t.storageLock.RLock()
1567 sum := t.hashPiece(piece)
1568 t.storageLock.RUnlock()
1571 t.pieceHashed(piece, sum == p.hash)
1572 t.publishPieceChange(piece)
1575 // Return the connections that touched a piece, and clear the entry while
1577 func (t *Torrent) reapPieceTouchers(piece int) (ret []*connection) {
1578 for c := range t.conns {
1579 if _, ok := c.peerTouchedPieces[piece]; ok {
1580 ret = append(ret, c)
1581 delete(c.peerTouchedPieces, piece)
1587 func (t *Torrent) connsAsSlice() (ret []*connection) {
1588 for c := range t.conns {
1589 ret = append(ret, c)
1594 // Currently doesn't really queue, but should in the future.
1595 func (t *Torrent) queuePieceCheck(pieceIndex int) {
1596 piece := &t.pieces[pieceIndex]
1597 if piece.queuedForHash() {
1600 t.piecesQueuedForHash.Add(pieceIndex)
1601 t.publishPieceChange(pieceIndex)
1602 go t.verifyPiece(pieceIndex)
1605 func (t *Torrent) VerifyData() {
1606 for i := range iter.N(t.NumPieces()) {
1607 t.Piece(i).VerifyData()