17 "github.com/davecgh/go-spew/spew"
19 "github.com/anacrolix/dht/v2"
20 "github.com/anacrolix/log"
21 "github.com/anacrolix/missinggo"
22 "github.com/anacrolix/missinggo/bitmap"
23 "github.com/anacrolix/missinggo/perf"
24 "github.com/anacrolix/missinggo/prioritybitmap"
25 "github.com/anacrolix/missinggo/pubsub"
26 "github.com/anacrolix/missinggo/slices"
28 "github.com/anacrolix/torrent/bencode"
29 "github.com/anacrolix/torrent/metainfo"
30 pp "github.com/anacrolix/torrent/peer_protocol"
31 "github.com/anacrolix/torrent/storage"
32 "github.com/anacrolix/torrent/tracker"
35 func (t *Torrent) chunkIndexSpec(chunkIndex pp.Integer, piece pieceIndex) chunkSpec {
36 return chunkIndexSpec(chunkIndex, t.pieceLength(piece), t.chunkSize)
39 // Maintains state of torrent within a Client.
41 // Torrent-level aggregate statistics. First in struct to ensure 64-bit
42 // alignment. See #262.
47 networkingEnabled bool
49 // Determines what chunks to request from peers. 1: Favour higher priority
50 // pieces with some fuzzing to reduce overlaps and wastage across
51 // connections. 2: The fastest connection downloads strictly in order of
52 // priority, while all others adher to their piece inclications. 3:
53 // Requests are strictly by piece priority, and not duplicated until
54 // duplicateRequestTimeout is reached.
56 // How long to avoid duplicating a pending request.
57 duplicateRequestTimeout time.Duration
59 closed missinggo.Event
60 infoHash metainfo.Hash
62 // Values are the piece indices that changed.
63 pieceStateChanges *pubsub.PubSub
64 // The size of chunks to request from peers over the wire. This is
65 // normally 16KiB by convention these days.
68 // Total length of the torrent in bytes. Stored because it's not O(1) to
69 // get this from the info dict.
72 // The storage to open when the info dict becomes available.
73 storageOpener *storage.Client
74 // Storage for torrent data.
75 storage *storage.Torrent
76 // Read-locked for using storage, and write-locked for Closing.
77 storageLock sync.RWMutex
79 // TODO: Only announce stuff is used?
80 metainfo metainfo.MetaInfo
82 // The info dict. nil if we don't have it (yet).
86 // Active peer connections, running message stream loops. TODO: Make this
87 // open (not-closed) connections only.
88 conns map[*connection]struct{}
89 maxEstablishedConns int
90 // Set of addrs to which we're attempting to connect. Connections are
91 // half-open until all handshakes are completed.
92 halfOpen map[string]Peer
93 fastestConn *connection
95 // Reserve of peers to connect to. A peer can be both here and in the
96 // active connections if were told about the peer after connecting with
97 // them. That encourages us to reconnect to peers that are well known in
99 peers prioritizedPeers
100 wantPeersEvent missinggo.Event
101 // An announcer for each tracker URL.
102 trackerAnnouncers map[string]*trackerScraper
103 // How many times we've initiated a DHT announce. TODO: Move into stats.
106 // Name used if the info name isn't available. Should be cleared when the
107 // Info does become available.
111 // The bencoded bytes of the info dict. This is actively manipulated if
112 // the info bytes aren't initially available, and we try to fetch them
115 // Each element corresponds to the 16KiB metadata pieces. If true, we have
116 // received that piece.
117 metadataCompletedChunks []bool
118 metadataChanged sync.Cond
120 // Set when .Info is obtained.
121 gotMetainfo missinggo.Event
123 readers map[*reader]struct{}
124 readerNowPieces bitmap.Bitmap
125 readerReadaheadPieces bitmap.Bitmap
127 // A cache of pieces we need to get. Calculated from various piece and
128 // file priorities and completion states elsewhere.
129 pendingPieces prioritybitmap.PriorityBitmap
130 // A cache of completed piece indices.
131 completedPieces bitmap.Bitmap
132 // Pieces that need to be hashed.
133 piecesQueuedForHash bitmap.Bitmap
134 activePieceHashes int
136 // A pool of piece priorities []int for assignment to new connections.
137 // These "inclinations" are used to give connections preference for
139 connPieceInclinationPool sync.Pool
141 // Count of each request across active connections.
142 pendingRequests map[request]int
143 // The last time we requested a chunk. Deleting the request from any
144 // connection will clear this value.
145 lastRequested map[request]*time.Timer
148 func (t *Torrent) tickleReaders() {
149 t.cl.event.Broadcast()
152 // Returns a channel that is closed when the Torrent is closed.
153 func (t *Torrent) Closed() <-chan struct{} {
154 return t.closed.LockedChan(t.cl.locker())
157 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
158 // pending, and half-open peers.
159 func (t *Torrent) KnownSwarm() (ks []Peer) {
160 // Add pending peers to the list
161 t.peers.Each(func(peer Peer) {
162 ks = append(ks, peer)
165 // Add half-open peers to the list
166 for _, peer := range t.halfOpen {
167 ks = append(ks, peer)
170 // Add active peers to the list
171 for conn := range t.conns {
173 ks = append(ks, Peer{
175 IP: conn.remoteAddr.IP,
176 Port: int(conn.remoteAddr.Port),
177 Source: conn.Discovery,
178 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
179 // > But if we're not connected to them with an encrypted connection, I couldn't say
180 // > what's appropriate. We can carry forward the SupportsEncryption value as we
181 // > received it from trackers/DHT/PEX, or just use the encryption state for the
182 // > connection. It's probably easiest to do the latter for now.
183 // https://github.com/anacrolix/torrent/pull/188
184 SupportsEncryption: conn.headerEncrypted,
191 func (t *Torrent) setChunkSize(size pp.Integer) {
193 t.chunkPool = &sync.Pool{
194 New: func() interface{} {
195 b := make([]byte, size)
201 func (t *Torrent) pieceComplete(piece pieceIndex) bool {
202 return t.completedPieces.Get(bitmap.BitIndex(piece))
205 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
206 return t.pieces[piece].Storage().Completion()
209 // There's a connection to that address already.
210 func (t *Torrent) addrActive(addr string) bool {
211 if _, ok := t.halfOpen[addr]; ok {
214 for c := range t.conns {
216 if ra.String() == addr {
223 func (t *Torrent) unclosedConnsAsSlice() (ret []*connection) {
224 ret = make([]*connection, 0, len(t.conns))
225 for c := range t.conns {
226 if !c.closed.IsSet() {
233 func (t *Torrent) addPeer(p Peer) {
235 peersAddedBySource.Add(string(p.Source), 1)
236 if t.closed.IsSet() {
239 if cl.badPeerIPPort(p.IP, p.Port) {
240 torrent.Add("peers not added because of bad addr", 1)
244 torrent.Add("peers replaced", 1)
247 for t.peers.Len() > cl.config.TorrentPeersHighWater {
248 _, ok := t.peers.DeleteMin()
250 torrent.Add("excess reserve peers discarded", 1)
255 func (t *Torrent) invalidateMetadata() {
256 for i := range t.metadataCompletedChunks {
257 t.metadataCompletedChunks[i] = false
264 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
268 if index >= len(t.metadataCompletedChunks) {
269 t.logger.Printf("%s: ignoring metadata piece %d", t, index)
272 copy(t.metadataBytes[(1<<14)*index:], data)
273 t.metadataCompletedChunks[index] = true
276 func (t *Torrent) metadataPieceCount() int {
277 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
280 func (t *Torrent) haveMetadataPiece(piece int) bool {
282 return (1<<14)*piece < len(t.metadataBytes)
284 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
288 func (t *Torrent) metadataSize() int {
289 return len(t.metadataBytes)
292 func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
293 for i := 0; i < len(info.Pieces); i += sha1.Size {
294 ret = append(ret, info.Pieces[i:i+sha1.Size])
299 func (t *Torrent) makePieces() {
300 hashes := infoPieceHashes(t.info)
301 t.pieces = make([]Piece, len(hashes), len(hashes))
302 for i, hash := range hashes {
303 piece := &t.pieces[i]
305 piece.index = pieceIndex(i)
306 piece.noPendingWrites.L = &piece.pendingWritesMutex
307 piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
309 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
310 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
311 piece.files = files[beginFile:endFile]
315 // Returns the index of the first file containing the piece. files must be
316 // ordered by offset.
317 func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
318 for i, f := range files {
319 if f.offset+f.length > pieceOffset {
326 // Returns the index after the last file containing the piece. files must be
327 // ordered by offset.
328 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
329 for i, f := range files {
330 if f.offset+f.length >= pieceEndOffset {
337 func (t *Torrent) cacheLength() {
339 for _, f := range t.info.UpvertedFiles() {
345 func (t *Torrent) setInfo(info *metainfo.Info) error {
346 if err := validateInfo(info); err != nil {
347 return fmt.Errorf("bad info: %s", err)
349 if t.storageOpener != nil {
351 t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
353 return fmt.Errorf("error opening torrent storage: %s", err)
359 t.displayName = "" // Save a few bytes lol.
366 func (t *Torrent) onSetInfo() {
367 for conn := range t.conns {
368 if err := conn.setNumPieces(t.numPieces()); err != nil {
369 t.logger.Printf("closing connection: %s", err)
373 for i := range t.pieces {
374 t.updatePieceCompletion(pieceIndex(i))
376 if !p.storageCompletionOk {
377 // t.logger.Printf("piece %s completion unknown, queueing check", p)
378 t.queuePieceCheck(pieceIndex(i))
381 t.cl.event.Broadcast()
383 t.updateWantPeersEvent()
384 t.pendingRequests = make(map[request]int)
385 t.lastRequested = make(map[request]*time.Timer)
386 t.tryCreateMorePieceHashers()
389 // Called when metadata for a torrent becomes available.
390 func (t *Torrent) setInfoBytes(b []byte) error {
391 if metainfo.HashBytes(b) != t.infoHash {
392 return errors.New("info bytes have wrong hash")
394 var info metainfo.Info
395 if err := bencode.Unmarshal(b, &info); err != nil {
396 return fmt.Errorf("error unmarshalling info bytes: %s", err)
398 if err := t.setInfo(&info); err != nil {
402 t.metadataCompletedChunks = nil
407 func (t *Torrent) haveAllMetadataPieces() bool {
411 if t.metadataCompletedChunks == nil {
414 for _, have := range t.metadataCompletedChunks {
422 // TODO: Propagate errors to disconnect peer.
423 func (t *Torrent) setMetadataSize(bytes int) (err error) {
425 // We already know the correct metadata size.
428 if bytes <= 0 || bytes > 10000000 { // 10MB, pulled from my ass.
429 return errors.New("bad size")
431 if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
434 t.metadataBytes = make([]byte, bytes)
435 t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
436 t.metadataChanged.Broadcast()
437 for c := range t.conns {
438 c.requestPendingMetadata()
443 // The current working name for the torrent. Either the name in the info dict,
444 // or a display name given such as by the dn value in a magnet link, or "".
445 func (t *Torrent) name() string {
447 defer t.nameMu.RUnlock()
454 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
455 p := &t.pieces[index]
456 ret.Priority = t.piecePriority(index)
457 ret.Completion = p.completion()
458 if p.queuedForHash() || p.hashing {
461 if !ret.Complete && t.piecePartiallyDownloaded(index) {
467 func (t *Torrent) metadataPieceSize(piece int) int {
468 return metadataPieceSize(len(t.metadataBytes), piece)
471 func (t *Torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
477 d["total_size"] = len(t.metadataBytes)
479 p := bencode.MustMarshal(d)
482 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
483 ExtendedPayload: append(p, data...),
487 func (t *Torrent) pieceStateRuns() (ret []PieceStateRun) {
488 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
489 ret = append(ret, PieceStateRun{
490 PieceState: el.(PieceState),
494 for index := range t.pieces {
495 rle.Append(t.pieceState(pieceIndex(index)), 1)
501 // Produces a small string representing a PieceStateRun.
502 func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
503 ret = fmt.Sprintf("%d", psr.Length)
504 ret += func() string {
505 switch psr.Priority {
506 case PiecePriorityNext:
508 case PiecePriorityNormal:
510 case PiecePriorityReadahead:
512 case PiecePriorityNow:
514 case PiecePriorityHigh:
535 func (t *Torrent) writeStatus(w io.Writer) {
536 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
537 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
539 fmt.Fprintf(w, "Metadata have: ")
540 for _, h := range t.metadataCompletedChunks {
541 fmt.Fprintf(w, "%c", func() rune {
551 fmt.Fprintf(w, "Piece length: %s\n", func() string {
553 return fmt.Sprint(t.usualPieceSize())
559 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
560 fmt.Fprint(w, "Piece States:")
561 for _, psr := range t.pieceStateRuns() {
563 w.Write([]byte(pieceStateRunStatusChars(psr)))
567 fmt.Fprintf(w, "Reader Pieces:")
568 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
569 fmt.Fprintf(w, " %d:%d", begin, end)
574 fmt.Fprintf(w, "Enabled trackers:\n")
576 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
577 fmt.Fprintf(tw, " URL\tNext announce\tLast announce\n")
578 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r *trackerScraper) bool {
581 var luns, runs url.URL = lu, ru
584 var ml missinggo.MultiLess
585 ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
586 ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
588 }).([]*trackerScraper) {
589 fmt.Fprintf(tw, " %s\n", ta.statusLine())
594 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
596 spew.NewDefaultConfig()
597 spew.Fdump(w, t.statsLocked())
599 conns := t.connsAsSlice()
600 slices.Sort(conns, worseConn)
601 for i, c := range conns {
602 fmt.Fprintf(w, "%2d. ", i+1)
607 func (t *Torrent) haveInfo() bool {
611 // Returns a run-time generated MetaInfo that includes the info bytes and
612 // announce-list as currently known to the client.
613 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
614 return metainfo.MetaInfo{
615 CreationDate: time.Now().Unix(),
616 Comment: "dynamic metainfo from client",
617 CreatedBy: "go.torrent",
618 AnnounceList: t.metainfo.UpvertedAnnounceList(),
619 InfoBytes: func() []byte {
621 return t.metadataBytes
629 func (t *Torrent) BytesMissing() int64 {
632 return t.bytesMissingLocked()
635 func (t *Torrent) bytesMissingLocked() int64 {
639 func (t *Torrent) bytesLeft() (left int64) {
640 bitmap.Flip(t.completedPieces, 0, bitmap.BitIndex(t.numPieces())).IterTyped(func(piece int) bool {
641 p := &t.pieces[piece]
642 left += int64(p.length() - p.numDirtyBytes())
648 // Bytes left to give in tracker announces.
649 func (t *Torrent) bytesLeftAnnounce() int64 {
657 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
658 if t.pieceComplete(piece) {
661 if t.pieceAllDirty(piece) {
664 return t.pieces[piece].hasDirtyChunks()
667 func (t *Torrent) usualPieceSize() int {
668 return int(t.info.PieceLength)
671 func (t *Torrent) numPieces() pieceIndex {
672 return pieceIndex(t.info.NumPieces())
675 func (t *Torrent) numPiecesCompleted() (num int) {
676 return t.completedPieces.Len()
679 func (t *Torrent) close() (err error) {
682 if t.storage != nil {
685 t.storageLock.Unlock()
687 for conn := range t.conns {
690 t.cl.event.Broadcast()
691 t.pieceStateChanges.Close()
692 t.updateWantPeersEvent()
696 func (t *Torrent) requestOffset(r request) int64 {
697 return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
700 // Return the request that would include the given offset into the torrent
701 // data. Returns !ok if there is no such request.
702 func (t *Torrent) offsetRequest(off int64) (req request, ok bool) {
703 return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
706 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
707 defer perf.ScopeTimerErr(&err)()
708 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
709 if err == nil && n != len(data) {
710 err = io.ErrShortWrite
715 func (t *Torrent) bitfield() (bf []bool) {
716 bf = make([]bool, t.numPieces())
717 t.completedPieces.IterTyped(func(piece int) (again bool) {
724 func (t *Torrent) pieceNumChunks(piece pieceIndex) pp.Integer {
725 return (t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize
728 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
729 t.pieces[pieceIndex].dirtyChunks.Clear()
732 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
733 if t.info.PieceLength == 0 {
734 // There will be no variance amongst pieces. Only pain.
737 if piece == t.numPieces()-1 {
738 ret := pp.Integer(*t.length % t.info.PieceLength)
743 return pp.Integer(t.info.PieceLength)
746 func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash) {
747 hash := pieceHash.New()
749 p.waitNoPendingWrites()
750 ip := t.info.Piece(int(piece))
752 n, err := io.Copy(hash, io.NewSectionReader(t.pieces[piece].Storage(), 0, pl))
754 missinggo.CopyExact(&ret, hash.Sum(nil))
757 if err != io.ErrUnexpectedEOF && !os.IsNotExist(err) {
758 t.logger.Printf("unexpected error hashing piece %d through %T: %s", piece, t.storage.TorrentImpl, err)
763 func (t *Torrent) haveAnyPieces() bool {
764 return t.completedPieces.Len() != 0
767 func (t *Torrent) haveAllPieces() bool {
771 return t.completedPieces.Len() == bitmap.BitIndex(t.numPieces())
774 func (t *Torrent) havePiece(index pieceIndex) bool {
775 return t.haveInfo() && t.pieceComplete(index)
778 func (t *Torrent) haveChunk(r request) (ret bool) {
780 // log.Println("have chunk", r, ret)
785 if t.pieceComplete(pieceIndex(r.Index)) {
788 p := &t.pieces[r.Index]
789 return !p.pendingChunk(r.chunkSpec, t.chunkSize)
792 func chunkIndex(cs chunkSpec, chunkSize pp.Integer) int {
793 return int(cs.Begin / chunkSize)
796 func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
800 if index < 0 || index >= t.numPieces() {
803 p := &t.pieces[index]
804 if p.queuedForHash() {
810 if t.pieceComplete(index) {
813 if t.pendingPieces.Contains(bitmap.BitIndex(index)) {
816 // t.logger.Printf("piece %d not pending", index)
817 return !t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
818 return index < begin || index >= end
822 // The worst connection is one that hasn't been sent, or sent anything useful
823 // for the longest. A bad connection is one that usually sends us unwanted
824 // pieces, or has been in worser half of the established connections for more
826 func (t *Torrent) worstBadConn() *connection {
827 wcs := worseConnSlice{t.unclosedConnsAsSlice()}
830 c := heap.Pop(&wcs).(*connection)
831 if c.stats.ChunksReadWasted.Int64() >= 6 && c.stats.ChunksReadWasted.Int64() > c.stats.ChunksReadUseful.Int64() {
834 // If the connection is in the worst half of the established
835 // connection quota and is older than a minute.
836 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
837 // Give connections 1 minute to prove themselves.
838 if time.Since(c.completedHandshake) > time.Minute {
846 type PieceStateChange struct {
851 func (t *Torrent) publishPieceChange(piece pieceIndex) {
852 t.cl._mu.Defer(func() {
853 cur := t.pieceState(piece)
854 p := &t.pieces[piece]
855 if cur != p.publicPieceState {
856 p.publicPieceState = cur
857 t.pieceStateChanges.Publish(PieceStateChange{
865 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
866 if t.pieceComplete(piece) {
869 return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
872 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
873 return t.pieces[piece].dirtyChunks.Len() == int(t.pieceNumChunks(piece))
876 func (t *Torrent) readersChanged() {
877 t.updateReaderPieces()
878 t.updateAllPiecePriorities()
881 func (t *Torrent) updateReaderPieces() {
882 t.readerNowPieces, t.readerReadaheadPieces = t.readerPiecePriorities()
885 func (t *Torrent) readerPosChanged(from, to pieceRange) {
889 t.updateReaderPieces()
890 // Order the ranges, high and low.
892 if l.begin > h.begin {
896 // Two distinct ranges.
897 t.updatePiecePriorities(l.begin, l.end)
898 t.updatePiecePriorities(h.begin, h.end)
905 t.updatePiecePriorities(l.begin, end)
909 func (t *Torrent) maybeNewConns() {
910 // Tickle the accept routine.
911 t.cl.event.Broadcast()
915 func (t *Torrent) piecePriorityChanged(piece pieceIndex) {
916 // t.logger.Printf("piece %d priority changed", piece)
917 for c := range t.conns {
918 if c.updatePiecePriority(piece) {
919 // log.Print("conn piece priority changed")
924 t.publishPieceChange(piece)
927 func (t *Torrent) updatePiecePriority(piece pieceIndex) {
928 p := &t.pieces[piece]
929 newPrio := p.uncachedPriority()
930 // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
931 if newPrio == PiecePriorityNone {
932 if !t.pendingPieces.Remove(bitmap.BitIndex(piece)) {
936 if !t.pendingPieces.Set(bitmap.BitIndex(piece), newPrio.BitmapPriority()) {
940 t.piecePriorityChanged(piece)
943 func (t *Torrent) updateAllPiecePriorities() {
944 t.updatePiecePriorities(0, t.numPieces())
947 // Update all piece priorities in one hit. This function should have the same
948 // output as updatePiecePriority, but across all pieces.
949 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex) {
950 for i := begin; i < end; i++ {
951 t.updatePiecePriority(i)
955 // Returns the range of pieces [begin, end) that contains the extent of bytes.
956 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
957 if off >= *t.length {
967 begin = pieceIndex(off / t.info.PieceLength)
968 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
969 if end > pieceIndex(t.info.NumPieces()) {
970 end = pieceIndex(t.info.NumPieces())
975 // Returns true if all iterations complete without breaking. Returns the read
976 // regions for all readers. The reader regions should not be merged as some
977 // callers depend on this method to enumerate readers.
978 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
979 for r := range t.readers {
981 if p.begin >= p.end {
984 if !f(p.begin, p.end) {
991 func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
992 prio, ok := t.pendingPieces.GetPriority(bitmap.BitIndex(piece))
994 return PiecePriorityNone
999 ret := piecePriority(-prio)
1000 if ret == PiecePriorityNone {
1006 func (t *Torrent) pendRequest(req request) {
1007 ci := chunkIndex(req.chunkSpec, t.chunkSize)
1008 t.pieces[req.Index].pendChunkIndex(ci)
1011 func (t *Torrent) pieceCompletionChanged(piece pieceIndex) {
1013 t.cl.event.Broadcast()
1014 if t.pieceComplete(piece) {
1015 t.onPieceCompleted(piece)
1017 t.onIncompletePiece(piece)
1019 t.updatePiecePriority(piece)
1022 func (t *Torrent) numReceivedConns() (ret int) {
1023 for c := range t.conns {
1024 if c.Discovery == peerSourceIncoming {
1031 func (t *Torrent) maxHalfOpen() int {
1032 // Note that if we somehow exceed the maximum established conns, we want
1033 // the negative value to have an effect.
1034 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
1035 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
1036 // We want to allow some experimentation with new peers, and to try to
1037 // upset an oversupply of received connections.
1038 return int(min(max(5, extraIncoming)+establishedHeadroom, int64(t.cl.config.HalfOpenConnsPerTorrent)))
1041 func (t *Torrent) openNewConns() {
1042 defer t.updateWantPeersEvent()
1043 for t.peers.Len() != 0 {
1047 if len(t.halfOpen) >= t.maxHalfOpen() {
1050 p := t.peers.PopMax()
1055 func (t *Torrent) getConnPieceInclination() []int {
1056 _ret := t.connPieceInclinationPool.Get()
1058 pieceInclinationsNew.Add(1)
1059 return rand.Perm(int(t.numPieces()))
1061 pieceInclinationsReused.Add(1)
1062 return *_ret.(*[]int)
1065 func (t *Torrent) putPieceInclination(pi []int) {
1066 t.connPieceInclinationPool.Put(&pi)
1067 pieceInclinationsPut.Add(1)
1070 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
1072 uncached := t.pieceCompleteUncached(piece)
1073 cached := p.completion()
1074 changed := cached != uncached
1075 p.storageCompletionOk = uncached.Ok
1076 t.completedPieces.Set(bitmap.BitIndex(piece), uncached.Complete)
1078 log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).WithValues(debugLogValue).Log(t.logger)
1079 t.pieceCompletionChanged(piece)
1084 // Non-blocking read. Client lock is not required.
1085 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1086 p := &t.pieces[off/t.info.PieceLength]
1087 p.waitNoPendingWrites()
1088 return p.Storage().ReadAt(b, off-p.Info().Offset())
1091 func (t *Torrent) updateAllPieceCompletions() {
1092 for i := pieceIndex(0); i < t.numPieces(); i++ {
1093 t.updatePieceCompletion(i)
1097 // Returns an error if the metadata was completed, but couldn't be set for
1098 // some reason. Blame it on the last peer to contribute.
1099 func (t *Torrent) maybeCompleteMetadata() error {
1104 if !t.haveAllMetadataPieces() {
1105 // Don't have enough metadata pieces.
1108 err := t.setInfoBytes(t.metadataBytes)
1110 t.invalidateMetadata()
1111 return fmt.Errorf("error setting info bytes: %s", err)
1113 if t.cl.config.Debug {
1114 t.logger.Printf("%s: got metadata from peers", t)
1119 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1120 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
1122 now.Add(bitmap.BitIndex(begin))
1123 readahead.AddRange(bitmap.BitIndex(begin)+1, bitmap.BitIndex(end))
1130 func (t *Torrent) needData() bool {
1131 if t.closed.IsSet() {
1137 return t.pendingPieces.Len() != 0
1140 func appendMissingStrings(old, new []string) (ret []string) {
1143 for _, n := range new {
1144 for _, o := range old {
1149 ret = append(ret, n)
1154 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1156 for minNumTiers > len(ret) {
1157 ret = append(ret, nil)
1162 func (t *Torrent) addTrackers(announceList [][]string) {
1163 fullAnnounceList := &t.metainfo.AnnounceList
1164 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1165 for tierIndex, trackerURLs := range announceList {
1166 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1168 t.startMissingTrackerScrapers()
1169 t.updateWantPeersEvent()
1172 // Don't call this before the info is available.
1173 func (t *Torrent) bytesCompleted() int64 {
1177 return t.info.TotalLength() - t.bytesLeft()
1180 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1183 return t.setInfoBytes(b)
1186 // Returns true if connection is removed from torrent.Conns.
1187 func (t *Torrent) deleteConnection(c *connection) (ret bool) {
1188 if !c.closed.IsSet() {
1189 panic("connection is not closed")
1190 // There are behaviours prevented by the closed state that will fail
1191 // if the connection has been deleted.
1195 torrent.Add("deleted connections", 1)
1196 c.deleteAllRequests()
1197 if len(t.conns) == 0 {
1198 t.assertNoPendingRequests()
1203 func (t *Torrent) assertNoPendingRequests() {
1204 if len(t.pendingRequests) != 0 {
1205 panic(t.pendingRequests)
1207 if len(t.lastRequested) != 0 {
1208 panic(t.lastRequested)
1212 func (t *Torrent) dropConnection(c *connection) {
1213 t.cl.event.Broadcast()
1215 if t.deleteConnection(c) {
1220 func (t *Torrent) wantPeers() bool {
1221 if t.closed.IsSet() {
1224 if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
1227 return t.needData() || t.seeding()
1230 func (t *Torrent) updateWantPeersEvent() {
1232 t.wantPeersEvent.Set()
1234 t.wantPeersEvent.Clear()
1238 // Returns whether the client should make effort to seed the torrent.
1239 func (t *Torrent) seeding() bool {
1241 if t.closed.IsSet() {
1244 if cl.config.NoUpload {
1247 if !cl.config.Seed {
1250 if cl.config.DisableAggressiveUpload && t.needData() {
1256 func (t *Torrent) startScrapingTracker(_url string) {
1260 u, err := url.Parse(_url)
1262 // URLs with a leading '*' appear to be a uTorrent convention to
1263 // disable trackers.
1265 log.Str("error parsing tracker url").AddValues("url", _url).Log(t.logger)
1269 if u.Scheme == "udp" {
1271 t.startScrapingTracker(u.String())
1273 t.startScrapingTracker(u.String())
1276 if u.Scheme == "udp4" && (t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4) {
1279 if u.Scheme == "udp6" && t.cl.config.DisableIPv6 {
1282 if _, ok := t.trackerAnnouncers[_url]; ok {
1285 newAnnouncer := &trackerScraper{
1289 if t.trackerAnnouncers == nil {
1290 t.trackerAnnouncers = make(map[string]*trackerScraper)
1292 t.trackerAnnouncers[_url] = newAnnouncer
1293 go newAnnouncer.Run()
1296 // Adds and starts tracker scrapers for tracker URLs that aren't already
1298 func (t *Torrent) startMissingTrackerScrapers() {
1299 if t.cl.config.DisableTrackers {
1302 t.startScrapingTracker(t.metainfo.Announce)
1303 for _, tier := range t.metainfo.AnnounceList {
1304 for _, url := range tier {
1305 t.startScrapingTracker(url)
1310 // Returns an AnnounceRequest with fields filled out to defaults and current
1312 func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
1313 // Note that IPAddress is not set. It's set for UDP inside the tracker
1314 // code, since it's dependent on the network in use.
1315 return tracker.AnnounceRequest{
1318 Port: uint16(t.cl.incomingPeerPort()),
1319 PeerId: t.cl.peerID,
1320 InfoHash: t.infoHash,
1321 Key: t.cl.announceKey(),
1323 // The following are vaguely described in BEP 3.
1325 Left: t.bytesLeftAnnounce(),
1326 Uploaded: t.stats.BytesWrittenData.Int64(),
1327 // There's no mention of wasted or unwanted download in the BEP.
1328 Downloaded: t.stats.BytesReadUsefulData.Int64(),
1332 // Adds peers revealed in an announce until the announce ends, or we have
1334 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
1336 for v := range pvs {
1338 for _, cp := range v.Peers {
1340 // Can't do anything with this.
1346 Source: peerSourceDhtGetPeers,
1353 func (t *Torrent) announceToDht(impliedPort bool, s *dht.Server) error {
1354 ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), impliedPort)
1358 go t.consumeDhtAnnouncePeers(ps.Peers)
1360 case <-t.closed.LockedChan(t.cl.locker()):
1361 case <-time.After(5 * time.Minute):
1367 func (t *Torrent) dhtAnnouncer(s *dht.Server) {
1371 case <-t.closed.LockedChan(cl.locker()):
1373 case <-t.wantPeersEvent.LockedChan(cl.locker()):
1378 err := t.announceToDht(true, s)
1380 t.logger.Printf("error announcing %q to DHT: %s", t, err)
1385 func (t *Torrent) addPeers(peers []Peer) {
1386 for _, p := range peers {
1391 func (t *Torrent) Stats() TorrentStats {
1393 defer t.cl.rUnlock()
1394 return t.statsLocked()
1397 func (t *Torrent) statsLocked() (ret TorrentStats) {
1398 ret.ActivePeers = len(t.conns)
1399 ret.HalfOpenPeers = len(t.halfOpen)
1400 ret.PendingPeers = t.peers.Len()
1401 ret.TotalPeers = t.numTotalPeers()
1402 ret.ConnectedSeeders = 0
1403 for c := range t.conns {
1404 if all, ok := c.peerHasAllPieces(); all && ok {
1405 ret.ConnectedSeeders++
1408 ret.ConnStats = t.stats.Copy()
1412 // The total number of peers in the torrent.
1413 func (t *Torrent) numTotalPeers() int {
1414 peers := make(map[string]struct{})
1415 for conn := range t.conns {
1416 ra := conn.conn.RemoteAddr()
1418 // It's been closed and doesn't support RemoteAddr.
1421 peers[ra.String()] = struct{}{}
1423 for addr := range t.halfOpen {
1424 peers[addr] = struct{}{}
1426 t.peers.Each(func(peer Peer) {
1427 peers[fmt.Sprintf("%s:%d", peer.IP, peer.Port)] = struct{}{}
1432 // Reconcile bytes transferred before connection was associated with a
1434 func (t *Torrent) reconcileHandshakeStats(c *connection) {
1435 if c.stats != (ConnStats{
1436 // Handshakes should only increment these fields:
1437 BytesWritten: c.stats.BytesWritten,
1438 BytesRead: c.stats.BytesRead,
1442 c.postHandshakeStats(func(cs *ConnStats) {
1443 cs.BytesRead.Add(c.stats.BytesRead.Int64())
1444 cs.BytesWritten.Add(c.stats.BytesWritten.Int64())
1446 c.reconciledHandshakeStats = true
1449 // Returns true if the connection is added.
1450 func (t *Torrent) addConnection(c *connection) (err error) {
1453 torrent.Add("added connections", 1)
1456 if t.closed.IsSet() {
1457 return errors.New("torrent closed")
1459 for c0 := range t.conns {
1460 if c.PeerID != c0.PeerID {
1463 if !t.cl.config.dropDuplicatePeerIds {
1466 if left, ok := c.hasPreferredNetworkOver(c0); ok && left {
1468 t.deleteConnection(c0)
1470 return errors.New("existing connection preferred")
1473 if len(t.conns) >= t.maxEstablishedConns {
1474 c := t.worstBadConn()
1476 return errors.New("don't want conns")
1479 t.deleteConnection(c)
1481 if len(t.conns) >= t.maxEstablishedConns {
1484 t.conns[c] = struct{}{}
1488 func (t *Torrent) wantConns() bool {
1489 if !t.networkingEnabled {
1492 if t.closed.IsSet() {
1495 if !t.seeding() && !t.needData() {
1498 if len(t.conns) < t.maxEstablishedConns {
1501 return t.worstBadConn() != nil
1504 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1507 oldMax = t.maxEstablishedConns
1508 t.maxEstablishedConns = max
1509 wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), worseConn)
1510 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1511 t.dropConnection(wcs.Pop().(*connection))
1517 func (t *Torrent) pieceHashed(piece pieceIndex, correct bool) {
1518 t.logger.Log(log.Fstr("hashed piece %d (passed=%t)", piece, correct).WithValues(debugLogValue))
1521 t.cl.event.Broadcast()
1522 if t.closed.IsSet() {
1525 touchers := t.reapPieceTouchers(piece)
1526 if p.storageCompletionOk {
1527 // Don't score the first time a piece is hashed, it could be an
1530 pieceHashedCorrect.Add(1)
1532 log.Fmsg("piece %d failed hash: %d connections contributed", piece, len(touchers)).AddValues(t, p).Log(t.logger)
1533 pieceHashedNotCorrect.Add(1)
1537 if len(touchers) != 0 {
1538 // Don't increment stats above connection-level for every involved
1540 t.allStats((*ConnStats).incrementPiecesDirtiedGood)
1542 for _, c := range touchers {
1543 c.stats.incrementPiecesDirtiedGood()
1545 err := p.Storage().MarkComplete()
1547 t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
1550 if len(touchers) != 0 {
1551 // Don't increment stats above connection-level for every involved
1553 t.allStats((*ConnStats).incrementPiecesDirtiedBad)
1554 for _, c := range touchers {
1555 // Y u do dis peer?!
1556 c.stats.incrementPiecesDirtiedBad()
1558 slices.Sort(touchers, connLessTrusted)
1559 if t.cl.config.Debug {
1560 t.logger.Printf("dropping first corresponding conn from trust: %v", func() (ret []int64) {
1561 for _, c := range touchers {
1562 ret = append(ret, c.netGoodPiecesDirtied())
1568 t.cl.banPeerIP(c.remoteAddr.IP)
1571 t.onIncompletePiece(piece)
1572 p.Storage().MarkNotComplete()
1574 t.updatePieceCompletion(piece)
1577 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
1578 // TODO: Make faster
1579 for cn := range t.conns {
1584 func (t *Torrent) onPieceCompleted(piece pieceIndex) {
1585 t.pendAllChunkSpecs(piece)
1586 t.cancelRequestsForPiece(piece)
1587 for conn := range t.conns {
1592 // Called when a piece is found to be not complete.
1593 func (t *Torrent) onIncompletePiece(piece pieceIndex) {
1594 if t.pieceAllDirty(piece) {
1595 t.pendAllChunkSpecs(piece)
1597 if !t.wantPieceIndex(piece) {
1598 // t.logger.Printf("piece %d incomplete and unwanted", piece)
1601 // We could drop any connections that we told we have a piece that we
1602 // don't here. But there's a test failure, and it seems clients don't care
1603 // if you request pieces that you already claim to have. Pruning bad
1604 // connections might just remove any connections that aren't treating us
1605 // favourably anyway.
1607 // for c := range t.conns {
1608 // if c.sentHave(piece) {
1612 for conn := range t.conns {
1613 if conn.PeerHasPiece(piece) {
1614 conn.updateRequests()
1619 func (t *Torrent) tryCreateMorePieceHashers() {
1620 for t.activePieceHashes < 2 && t.tryCreatePieceHasher() {
1624 func (t *Torrent) tryCreatePieceHasher() bool {
1625 if t.storage == nil {
1628 pi, ok := t.getPieceToHash()
1633 t.piecesQueuedForHash.Remove(pi)
1635 t.publishPieceChange(pi)
1636 t.updatePiecePriority(pi)
1637 t.storageLock.RLock()
1638 t.activePieceHashes++
1639 go t.pieceHasher(pi)
1643 func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
1644 t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
1645 if t.piece(i).hashing {
1655 func (t *Torrent) pieceHasher(index pieceIndex) {
1657 sum := t.hashPiece(index)
1658 t.storageLock.RUnlock()
1662 t.updatePiecePriority(index)
1663 t.pieceHashed(index, sum == *p.hash)
1664 t.publishPieceChange(index)
1665 t.activePieceHashes--
1666 t.tryCreateMorePieceHashers()
1669 // Return the connections that touched a piece, and clear the entries while
1671 func (t *Torrent) reapPieceTouchers(piece pieceIndex) (ret []*connection) {
1672 for c := range t.pieces[piece].dirtiers {
1673 delete(c.peerTouchedPieces, piece)
1674 ret = append(ret, c)
1676 t.pieces[piece].dirtiers = nil
1680 func (t *Torrent) connsAsSlice() (ret []*connection) {
1681 for c := range t.conns {
1682 ret = append(ret, c)
1687 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
1688 piece := t.piece(pieceIndex)
1689 if piece.queuedForHash() {
1692 t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
1693 t.publishPieceChange(pieceIndex)
1694 t.updatePiecePriority(pieceIndex)
1695 t.tryCreateMorePieceHashers()
1698 // Forces all the pieces to be re-hashed. See also Piece.VerifyData.
1699 func (t *Torrent) VerifyData() {
1700 for i := pieceIndex(0); i < t.NumPieces(); i++ {
1701 t.Piece(i).VerifyData()
1705 // Start the process of connecting to the given peer for the given torrent if
1707 func (t *Torrent) initiateConn(peer Peer) {
1708 if peer.Id == t.cl.peerID {
1711 if t.cl.badPeerIPPort(peer.IP, peer.Port) {
1714 addr := IpPort{peer.IP, uint16(peer.Port)}
1715 if t.addrActive(addr.String()) {
1718 t.halfOpen[addr.String()] = peer
1719 go t.cl.outgoingConnection(t, addr, peer.Source)
1722 func (t *Torrent) AddClientPeer(cl *Client) {
1723 t.AddPeers(func() (ps []Peer) {
1724 for _, la := range cl.ListenAddrs() {
1725 ps = append(ps, Peer{
1726 IP: missinggo.AddrIP(la),
1727 Port: missinggo.AddrPort(la),
1734 // All stats that include this Torrent. Useful when we want to increment
1735 // ConnStats but not for every connection.
1736 func (t *Torrent) allStats(f func(*ConnStats)) {
1741 func (t *Torrent) hashingPiece(i pieceIndex) bool {
1742 return t.pieces[i].hashing
1745 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
1746 return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
1749 func (t *Torrent) dialTimeout() time.Duration {
1750 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
1753 func (t *Torrent) piece(i int) *Piece {