19 "github.com/RoaringBitmap/roaring"
20 "github.com/anacrolix/chansync"
21 "github.com/anacrolix/chansync/events"
22 "github.com/anacrolix/dht/v2"
23 . "github.com/anacrolix/generics"
24 "github.com/anacrolix/log"
25 "github.com/anacrolix/missinggo/perf"
26 "github.com/anacrolix/missinggo/slices"
27 "github.com/anacrolix/missinggo/v2"
28 "github.com/anacrolix/missinggo/v2/bitmap"
29 "github.com/anacrolix/missinggo/v2/pubsub"
30 "github.com/anacrolix/multiless"
31 "github.com/anacrolix/sync"
32 request_strategy "github.com/anacrolix/torrent/request-strategy"
33 typedRoaring "github.com/anacrolix/torrent/typed-roaring"
34 "github.com/davecgh/go-spew/spew"
35 "github.com/pion/datachannel"
37 "github.com/anacrolix/torrent/bencode"
38 "github.com/anacrolix/torrent/common"
39 "github.com/anacrolix/torrent/metainfo"
40 pp "github.com/anacrolix/torrent/peer_protocol"
41 "github.com/anacrolix/torrent/segments"
42 "github.com/anacrolix/torrent/storage"
43 "github.com/anacrolix/torrent/tracker"
44 "github.com/anacrolix/torrent/webseed"
45 "github.com/anacrolix/torrent/webtorrent"
48 // Maintains state of torrent within a Client. Many methods should not be called before the info is
49 // available, see .Info and .GotInfo.
51 // Torrent-level aggregate statistics. First in struct to ensure 64-bit
52 // alignment. See #262.
57 networkingEnabled chansync.Flag
58 dataDownloadDisallowed chansync.Flag
59 dataUploadDisallowed bool
60 userOnWriteChunkErr func(error)
62 closed chansync.SetOnce
63 infoHash metainfo.Hash
65 // Values are the piece indices that changed.
66 pieceStateChanges pubsub.PubSub[PieceStateChange]
67 // The size of chunks to request from peers over the wire. This is
68 // normally 16KiB by convention these days.
71 // Total length of the torrent in bytes. Stored because it's not O(1) to
72 // get this from the info dict.
75 // The storage to open when the info dict becomes available.
76 storageOpener *storage.Client
77 // Storage for torrent data.
78 storage *storage.Torrent
79 // Read-locked for using storage, and write-locked for Closing.
80 storageLock sync.RWMutex
82 // TODO: Only announce stuff is used?
83 metainfo metainfo.MetaInfo
85 // The info dict. nil if we don't have it (yet).
87 fileIndex segments.Index
90 _chunksPerRegularPiece chunkIndexType
92 webSeeds map[string]*Peer
93 // Active peer connections, running message stream loops. TODO: Make this
94 // open (not-closed) connections only.
95 conns map[*PeerConn]struct{}
96 maxEstablishedConns int
97 // Set of addrs to which we're attempting to connect. Connections are
98 // half-open until all handshakes are completed.
99 halfOpen map[string]PeerInfo
101 // Reserve of peers to connect to. A peer can be both here and in the
102 // active connections if were told about the peer after connecting with
103 // them. That encourages us to reconnect to peers that are well known in
105 peers prioritizedPeers
106 // Whether we want to know to know more peers.
107 wantPeersEvent missinggo.Event
108 // An announcer for each tracker URL.
109 trackerAnnouncers map[string]torrentTrackerAnnouncer
110 // How many times we've initiated a DHT announce. TODO: Move into stats.
113 // Name used if the info name isn't available. Should be cleared when the
114 // Info does become available.
118 // The bencoded bytes of the info dict. This is actively manipulated if
119 // the info bytes aren't initially available, and we try to fetch them
122 // Each element corresponds to the 16KiB metadata pieces. If true, we have
123 // received that piece.
124 metadataCompletedChunks []bool
125 metadataChanged sync.Cond
127 // Closed when .Info is obtained.
128 gotMetainfoC chan struct{}
130 readers map[*reader]struct{}
131 _readerNowPieces bitmap.Bitmap
132 _readerReadaheadPieces bitmap.Bitmap
134 // A cache of pieces we need to get. Calculated from various piece and
135 // file priorities and completion states elsewhere.
136 _pendingPieces roaring.Bitmap
137 // A cache of completed piece indices.
138 _completedPieces roaring.Bitmap
139 // Pieces that need to be hashed.
140 piecesQueuedForHash bitmap.Bitmap
141 activePieceHashes int
142 initialPieceCheckDisabled bool
144 connsWithAllPieces map[*Peer]struct{}
146 requestState []requestState
147 // Chunks we've written to since the corresponding piece was last checked.
148 dirtyChunks typedRoaring.Bitmap[RequestIndex]
152 // Is On when all pieces are complete.
153 Complete chansync.Flag
155 // Torrent sources in use keyed by the source string.
156 activeSources sync.Map
157 sourcesLogger log.Logger
159 smartBanCache smartBanCache
161 // Large allocations reused between request state updates.
162 requestPieceStates []request_strategy.PieceRequestOrderState
163 requestIndexes []RequestIndex
166 func (t *Torrent) selectivePieceAvailabilityFromPeers(i pieceIndex) (count int) {
167 // This could be done with roaring.BitSliceIndexing.
168 t.iterPeers(func(peer *Peer) {
169 if _, ok := t.connsWithAllPieces[peer]; ok {
172 if peer.peerHasPiece(i) {
179 func (t *Torrent) decPieceAvailability(i pieceIndex) {
184 if p.relativeAvailability <= 0 {
185 panic(p.relativeAvailability)
187 p.relativeAvailability--
188 t.updatePieceRequestOrder(i)
191 func (t *Torrent) incPieceAvailability(i pieceIndex) {
192 // If we don't the info, this should be reconciled when we do.
195 p.relativeAvailability++
196 t.updatePieceRequestOrder(i)
200 func (t *Torrent) readerNowPieces() bitmap.Bitmap {
201 return t._readerNowPieces
204 func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
205 return t._readerReadaheadPieces
208 func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool {
209 return !t.wantPieceIndex(i)
212 // Returns a channel that is closed when the Torrent is closed.
213 func (t *Torrent) Closed() events.Done {
214 return t.closed.Done()
217 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
218 // pending, and half-open peers.
219 func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
220 // Add pending peers to the list
221 t.peers.Each(func(peer PeerInfo) {
222 ks = append(ks, peer)
225 // Add half-open peers to the list
226 for _, peer := range t.halfOpen {
227 ks = append(ks, peer)
230 // Add active peers to the list
231 for conn := range t.conns {
232 ks = append(ks, PeerInfo{
234 Addr: conn.RemoteAddr,
235 Source: conn.Discovery,
236 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
237 // > But if we're not connected to them with an encrypted connection, I couldn't say
238 // > what's appropriate. We can carry forward the SupportsEncryption value as we
239 // > received it from trackers/DHT/PEX, or just use the encryption state for the
240 // > connection. It's probably easiest to do the latter for now.
241 // https://github.com/anacrolix/torrent/pull/188
242 SupportsEncryption: conn.headerEncrypted,
249 func (t *Torrent) setChunkSize(size pp.Integer) {
251 t.chunkPool = sync.Pool{
252 New: func() interface{} {
253 b := make([]byte, size)
259 func (t *Torrent) pieceComplete(piece pieceIndex) bool {
260 return t._completedPieces.Contains(bitmap.BitIndex(piece))
263 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
264 if t.storage == nil {
265 return storage.Completion{Complete: false, Ok: true}
267 return t.pieces[piece].Storage().Completion()
270 // There's a connection to that address already.
271 func (t *Torrent) addrActive(addr string) bool {
272 if _, ok := t.halfOpen[addr]; ok {
275 for c := range t.conns {
277 if ra.String() == addr {
284 func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn {
285 return t.appendConns(ret, func(conn *PeerConn) bool {
286 return !conn.closed.IsSet()
290 func (t *Torrent) appendConns(ret []*PeerConn, f func(*PeerConn) bool) []*PeerConn {
291 for c := range t.conns {
299 func (t *Torrent) addPeer(p PeerInfo) (added bool) {
301 torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1)
302 if t.closed.IsSet() {
305 if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok {
306 if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) {
307 torrent.Add("peers not added because of bad addr", 1)
308 // cl.logger.Printf("peers not added because of bad addr: %v", p)
312 if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok {
313 torrent.Add("peers replaced", 1)
314 if !replaced.equal(p) {
315 t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced)
322 for t.peers.Len() > cl.config.TorrentPeersHighWater {
323 _, ok := t.peers.DeleteMin()
325 torrent.Add("excess reserve peers discarded", 1)
331 func (t *Torrent) invalidateMetadata() {
332 for i := 0; i < len(t.metadataCompletedChunks); i++ {
333 t.metadataCompletedChunks[i] = false
336 t.gotMetainfoC = make(chan struct{})
341 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
345 if index >= len(t.metadataCompletedChunks) {
346 t.logger.Printf("%s: ignoring metadata piece %d", t, index)
349 copy(t.metadataBytes[(1<<14)*index:], data)
350 t.metadataCompletedChunks[index] = true
353 func (t *Torrent) metadataPieceCount() int {
354 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
357 func (t *Torrent) haveMetadataPiece(piece int) bool {
359 return (1<<14)*piece < len(t.metadataBytes)
361 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
365 func (t *Torrent) metadataSize() int {
366 return len(t.metadataBytes)
369 func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
370 for i := 0; i < len(info.Pieces); i += sha1.Size {
371 ret = append(ret, info.Pieces[i:i+sha1.Size])
376 func (t *Torrent) makePieces() {
377 hashes := infoPieceHashes(t.info)
378 t.pieces = make([]Piece, len(hashes))
379 for i, hash := range hashes {
380 piece := &t.pieces[i]
382 piece.index = pieceIndex(i)
383 piece.noPendingWrites.L = &piece.pendingWritesMutex
384 piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
386 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
387 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
388 piece.files = files[beginFile:endFile]
389 piece.undirtiedChunksIter = undirtiedChunksIter{
390 TorrentDirtyChunks: &t.dirtyChunks,
391 StartRequestIndex: piece.requestIndexOffset(),
392 EndRequestIndex: piece.requestIndexOffset() + piece.numChunks(),
397 // Returns the index of the first file containing the piece. files must be
398 // ordered by offset.
399 func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
400 for i, f := range files {
401 if f.offset+f.length > pieceOffset {
408 // Returns the index after the last file containing the piece. files must be
409 // ordered by offset.
410 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
411 for i, f := range files {
412 if f.offset+f.length >= pieceEndOffset {
419 func (t *Torrent) cacheLength() {
421 for _, f := range t.info.UpvertedFiles() {
427 // TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
429 func (t *Torrent) setInfo(info *metainfo.Info) error {
430 if err := validateInfo(info); err != nil {
431 return fmt.Errorf("bad info: %s", err)
433 if t.storageOpener != nil {
435 t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
437 return fmt.Errorf("error opening torrent storage: %s", err)
443 t._chunksPerRegularPiece = chunkIndexType((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
445 t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
446 t.displayName = "" // Save a few bytes lol.
453 func (t *Torrent) pieceRequestOrderKey(i int) request_strategy.PieceRequestOrderKey {
454 return request_strategy.PieceRequestOrderKey{
455 InfoHash: t.infoHash,
460 // This seems to be all the follow-up tasks after info is set, that can't fail.
461 func (t *Torrent) onSetInfo() {
462 t.initPieceRequestOrder()
463 MakeSliceWithLength(&t.requestState, t.numChunks())
464 MakeSliceWithLength(&t.requestPieceStates, t.numPieces())
465 for i := range t.pieces {
467 // Need to add relativeAvailability before updating piece completion, as that may result in conns
469 if p.relativeAvailability != 0 {
470 panic(p.relativeAvailability)
472 p.relativeAvailability = t.selectivePieceAvailabilityFromPeers(i)
473 t.addRequestOrderPiece(i)
474 t.updatePieceCompletion(i)
475 if !t.initialPieceCheckDisabled && !p.storageCompletionOk {
476 // t.logger.Printf("piece %s completion unknown, queueing check", p)
480 t.cl.event.Broadcast()
481 close(t.gotMetainfoC)
482 t.updateWantPeersEvent()
483 t.tryCreateMorePieceHashers()
484 t.iterPeers(func(p *Peer) {
486 p.updateRequests("onSetInfo")
490 // Called when metadata for a torrent becomes available.
491 func (t *Torrent) setInfoBytesLocked(b []byte) error {
492 if metainfo.HashBytes(b) != t.infoHash {
493 return errors.New("info bytes have wrong hash")
495 var info metainfo.Info
496 if err := bencode.Unmarshal(b, &info); err != nil {
497 return fmt.Errorf("error unmarshalling info bytes: %s", err)
500 t.metadataCompletedChunks = nil
504 if err := t.setInfo(&info); err != nil {
511 func (t *Torrent) haveAllMetadataPieces() bool {
515 if t.metadataCompletedChunks == nil {
518 for _, have := range t.metadataCompletedChunks {
526 // TODO: Propagate errors to disconnect peer.
527 func (t *Torrent) setMetadataSize(size int) (err error) {
529 // We already know the correct metadata size.
532 if uint32(size) > maxMetadataSize {
533 return errors.New("bad size")
535 if len(t.metadataBytes) == size {
538 t.metadataBytes = make([]byte, size)
539 t.metadataCompletedChunks = make([]bool, (size+(1<<14)-1)/(1<<14))
540 t.metadataChanged.Broadcast()
541 for c := range t.conns {
542 c.requestPendingMetadata()
547 // The current working name for the torrent. Either the name in the info dict,
548 // or a display name given such as by the dn value in a magnet link, or "".
549 func (t *Torrent) name() string {
551 defer t.nameMu.RUnlock()
553 return t.info.BestName()
555 if t.displayName != "" {
558 return "infohash:" + t.infoHash.HexString()
561 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
562 p := &t.pieces[index]
563 ret.Priority = t.piecePriority(index)
564 ret.Completion = p.completion()
565 ret.QueuedForHash = p.queuedForHash()
566 ret.Hashing = p.hashing
567 ret.Checking = ret.QueuedForHash || ret.Hashing
568 ret.Marking = p.marking
569 if !ret.Complete && t.piecePartiallyDownloaded(index) {
575 func (t *Torrent) metadataPieceSize(piece int) int {
576 return metadataPieceSize(len(t.metadataBytes), piece)
579 func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType pp.ExtendedMetadataRequestMsgType, piece int, data []byte) pp.Message {
582 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
583 ExtendedPayload: append(bencode.MustMarshal(pp.ExtendedMetadataRequestMsg{
585 TotalSize: len(t.metadataBytes),
591 type pieceAvailabilityRun struct {
596 func (me pieceAvailabilityRun) String() string {
597 return fmt.Sprintf("%v(%v)", me.Count, me.Availability)
600 func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) {
601 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
602 ret = append(ret, pieceAvailabilityRun{Availability: el.(int), Count: int(count)})
604 for i := range t.pieces {
605 rle.Append(t.pieces[i].availability(), 1)
611 func (t *Torrent) pieceAvailabilityFrequencies() (freqs []int) {
612 freqs = make([]int, t.numActivePeers()+1)
613 for i := range t.pieces {
614 freqs[t.piece(i).availability()]++
619 func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
620 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
621 ret = append(ret, PieceStateRun{
622 PieceState: el.(PieceState),
626 for index := range t.pieces {
627 rle.Append(t.pieceState(pieceIndex(index)), 1)
633 // Produces a small string representing a PieceStateRun.
634 func (psr PieceStateRun) String() (ret string) {
635 ret = fmt.Sprintf("%d", psr.Length)
636 ret += func() string {
637 switch psr.Priority {
638 case PiecePriorityNext:
640 case PiecePriorityNormal:
642 case PiecePriorityReadahead:
644 case PiecePriorityNow:
646 case PiecePriorityHigh:
655 if psr.QueuedForHash {
673 func (t *Torrent) writeStatus(w io.Writer) {
674 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
675 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
677 fmt.Fprintf(w, "Metadata have: ")
678 for _, h := range t.metadataCompletedChunks {
679 fmt.Fprintf(w, "%c", func() rune {
689 fmt.Fprintf(w, "Piece length: %s\n",
692 return fmt.Sprintf("%v (%v chunks)",
694 float64(t.usualPieceSize())/float64(t.chunkSize))
701 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
702 fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns())
703 // Generates a huge, unhelpful listing when piece availability is very scattered. Prefer
704 // availability frequencies instead.
706 fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
707 for _, run := range t.pieceAvailabilityRuns() {
708 ret = append(ret, run.String())
713 fmt.Fprintf(w, "Piece availability frequency: %v\n", strings.Join(
714 func() (ret []string) {
715 for avail, freq := range t.pieceAvailabilityFrequencies() {
719 ret = append(ret, fmt.Sprintf("%v: %v", avail, freq))
725 fmt.Fprintf(w, "Reader Pieces:")
726 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
727 fmt.Fprintf(w, " %d:%d", begin, end)
732 fmt.Fprintf(w, "Enabled trackers:\n")
734 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
735 fmt.Fprintf(tw, " URL\tExtra\n")
736 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r torrentTrackerAnnouncer) bool {
739 var luns, runs url.URL = *lu, *ru
742 var ml missinggo.MultiLess
743 ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
744 ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
746 }).([]torrentTrackerAnnouncer) {
747 fmt.Fprintf(tw, " %q\t%v\n", ta.URL(), ta.statusLine())
752 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
754 spew.NewDefaultConfig()
755 spew.Fdump(w, t.statsLocked())
757 peers := t.peersAsSlice()
758 sort.Slice(peers, func(_i, _j int) bool {
761 if less, ok := multiless.New().EagerSameLess(
762 i.downloadRate() == j.downloadRate(), i.downloadRate() < j.downloadRate(),
766 return worseConn(i, j)
768 for i, c := range peers {
769 fmt.Fprintf(w, "%2d. ", i+1)
774 func (t *Torrent) haveInfo() bool {
778 // Returns a run-time generated MetaInfo that includes the info bytes and
779 // announce-list as currently known to the client.
780 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
781 return metainfo.MetaInfo{
782 CreationDate: time.Now().Unix(),
783 Comment: "dynamic metainfo from client",
784 CreatedBy: "go.torrent",
785 AnnounceList: t.metainfo.UpvertedAnnounceList().Clone(),
786 InfoBytes: func() []byte {
788 return t.metadataBytes
793 UrlList: func() []string {
794 ret := make([]string, 0, len(t.webSeeds))
795 for url := range t.webSeeds {
796 ret = append(ret, url)
804 func (t *Torrent) BytesMissing() (n int64) {
806 n = t.bytesMissingLocked()
811 func (t *Torrent) bytesMissingLocked() int64 {
815 func iterFlipped(b *roaring.Bitmap, end uint64, cb func(uint32) bool) {
816 roaring.Flip(b, 0, end).Iterate(cb)
819 func (t *Torrent) bytesLeft() (left int64) {
820 iterFlipped(&t._completedPieces, uint64(t.numPieces()), func(x uint32) bool {
821 p := t.piece(pieceIndex(x))
822 left += int64(p.length() - p.numDirtyBytes())
828 // Bytes left to give in tracker announces.
829 func (t *Torrent) bytesLeftAnnounce() int64 {
837 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
838 if t.pieceComplete(piece) {
841 if t.pieceAllDirty(piece) {
844 return t.pieces[piece].hasDirtyChunks()
847 func (t *Torrent) usualPieceSize() int {
848 return int(t.info.PieceLength)
851 func (t *Torrent) numPieces() pieceIndex {
852 return pieceIndex(t.info.NumPieces())
855 func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
856 return pieceIndex(t._completedPieces.GetCardinality())
859 func (t *Torrent) close(wg *sync.WaitGroup) (err error) {
861 err = errors.New("already closed")
864 if t.storage != nil {
869 defer t.storageLock.Unlock()
870 if f := t.storage.Close; f != nil {
873 t.logger.WithDefaultLevel(log.Warning).Printf("error closing storage: %v", err1)
878 t.iterPeers(func(p *Peer) {
881 if t.storage != nil {
882 t.deletePieceRequestOrder()
884 for i := range t.pieces {
886 if p.relativeAvailability != 0 {
887 panic(fmt.Sprintf("piece %v has relative availability %v", i, p.relativeAvailability))
891 t.cl.event.Broadcast()
892 t.pieceStateChanges.Close()
893 t.updateWantPeersEvent()
897 func (t *Torrent) requestOffset(r Request) int64 {
898 return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
901 // Return the request that would include the given offset into the torrent data. Returns !ok if
902 // there is no such request.
903 func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
904 return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
907 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
908 defer perf.ScopeTimerErr(&err)()
909 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
910 if err == nil && n != len(data) {
911 err = io.ErrShortWrite
916 func (t *Torrent) bitfield() (bf []bool) {
917 bf = make([]bool, t.numPieces())
918 t._completedPieces.Iterate(func(piece uint32) (again bool) {
925 func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType {
926 return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
929 func (t *Torrent) chunksPerRegularPiece() chunkIndexType {
930 return t._chunksPerRegularPiece
933 func (t *Torrent) numChunks() RequestIndex {
934 if t.numPieces() == 0 {
937 return RequestIndex(t.numPieces()-1)*t.chunksPerRegularPiece() + t.pieceNumChunks(t.numPieces()-1)
940 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
941 t.dirtyChunks.RemoveRange(
942 uint64(t.pieceRequestIndexOffset(pieceIndex)),
943 uint64(t.pieceRequestIndexOffset(pieceIndex+1)))
946 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
947 if t.info.PieceLength == 0 {
948 // There will be no variance amongst pieces. Only pain.
951 if piece == t.numPieces()-1 {
952 ret := pp.Integer(*t.length % t.info.PieceLength)
957 return pp.Integer(t.info.PieceLength)
960 func (t *Torrent) smartBanBlockCheckingWriter(piece pieceIndex) *blockCheckingWriter {
961 return &blockCheckingWriter{
962 cache: &t.smartBanCache,
963 requestIndex: t.pieceRequestIndexOffset(piece),
964 chunkSize: t.chunkSize.Int(),
968 func (t *Torrent) hashPiece(piece pieceIndex) (
970 // These are peers that sent us blocks that differ from what we hash here.
971 differingPeers map[bannableAddr]struct{},
975 p.waitNoPendingWrites()
976 storagePiece := t.pieces[piece].Storage()
978 // Does the backend want to do its own hashing?
979 if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
980 var sum metainfo.Hash
981 // log.Printf("A piece decided to self-hash: %d", piece)
982 sum, err = i.SelfHash()
983 missinggo.CopyExact(&ret, sum)
987 hash := pieceHash.New()
988 const logPieceContents = false
989 smartBanWriter := t.smartBanBlockCheckingWriter(piece)
990 writers := []io.Writer{hash, smartBanWriter}
991 var examineBuf bytes.Buffer
992 if logPieceContents {
993 writers = append(writers, &examineBuf)
995 _, err = storagePiece.WriteTo(io.MultiWriter(writers...))
996 if logPieceContents {
997 t.logger.WithDefaultLevel(log.Debug).Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
999 smartBanWriter.Flush()
1000 differingPeers = smartBanWriter.badPeers
1001 missinggo.CopyExact(&ret, hash.Sum(nil))
1005 func (t *Torrent) haveAnyPieces() bool {
1006 return !t._completedPieces.IsEmpty()
1009 func (t *Torrent) haveAllPieces() bool {
1013 return t._completedPieces.GetCardinality() == bitmap.BitRange(t.numPieces())
1016 func (t *Torrent) havePiece(index pieceIndex) bool {
1017 return t.haveInfo() && t.pieceComplete(index)
1020 func (t *Torrent) maybeDropMutuallyCompletePeer(
1021 // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's
1025 if !t.cl.config.DropMutuallyCompletePeers {
1028 if !t.haveAllPieces() {
1031 if all, known := p.peerHasAllPieces(); !(known && all) {
1037 t.logger.WithDefaultLevel(log.Debug).Printf("dropping %v, which is mutually complete", p)
1041 func (t *Torrent) haveChunk(r Request) (ret bool) {
1043 // log.Println("have chunk", r, ret)
1048 if t.pieceComplete(pieceIndex(r.Index)) {
1051 p := &t.pieces[r.Index]
1052 return !p.pendingChunk(r.ChunkSpec, t.chunkSize)
1055 func chunkIndexFromChunkSpec(cs ChunkSpec, chunkSize pp.Integer) chunkIndexType {
1056 return chunkIndexType(cs.Begin / chunkSize)
1059 func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
1060 return t._pendingPieces.Contains(uint32(index))
1063 // A pool of []*PeerConn, to reduce allocations in functions that need to index or sort Torrent
1064 // conns (which is a map).
1065 var peerConnSlices sync.Pool
1067 func getPeerConnSlice(cap int) []*PeerConn {
1068 getInterface := peerConnSlices.Get()
1069 if getInterface == nil {
1070 return make([]*PeerConn, 0, cap)
1072 return getInterface.([]*PeerConn)[:0]
1076 // The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
1077 // connection is one that usually sends us unwanted pieces, or has been in the worse half of the
1078 // established connections for more than a minute. This is O(n log n). If there was a way to not
1079 // consider the position of a conn relative to the total number, it could be reduced to O(n).
1080 func (t *Torrent) worstBadConn() (ret *PeerConn) {
1081 wcs := worseConnSlice{conns: t.appendUnclosedConns(getPeerConnSlice(len(t.conns)))}
1082 defer peerConnSlices.Put(wcs.conns)
1085 for wcs.Len() != 0 {
1086 c := heap.Pop(&wcs).(*PeerConn)
1087 if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
1090 // If the connection is in the worst half of the established
1091 // connection quota and is older than a minute.
1092 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
1093 // Give connections 1 minute to prove themselves.
1094 if time.Since(c.completedHandshake) > time.Minute {
1102 type PieceStateChange struct {
1107 func (t *Torrent) publishPieceChange(piece pieceIndex) {
1108 t.cl._mu.Defer(func() {
1109 cur := t.pieceState(piece)
1110 p := &t.pieces[piece]
1111 if cur != p.publicPieceState {
1112 p.publicPieceState = cur
1113 t.pieceStateChanges.Publish(PieceStateChange{
1121 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
1122 if t.pieceComplete(piece) {
1125 return pp.Integer(t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks())
1128 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
1129 return t.pieces[piece].allChunksDirty()
1132 func (t *Torrent) readersChanged() {
1133 t.updateReaderPieces()
1134 t.updateAllPiecePriorities("Torrent.readersChanged")
1137 func (t *Torrent) updateReaderPieces() {
1138 t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
1141 func (t *Torrent) readerPosChanged(from, to pieceRange) {
1145 t.updateReaderPieces()
1146 // Order the ranges, high and low.
1148 if l.begin > h.begin {
1151 if l.end < h.begin {
1152 // Two distinct ranges.
1153 t.updatePiecePriorities(l.begin, l.end, "Torrent.readerPosChanged")
1154 t.updatePiecePriorities(h.begin, h.end, "Torrent.readerPosChanged")
1161 t.updatePiecePriorities(l.begin, end, "Torrent.readerPosChanged")
1165 func (t *Torrent) maybeNewConns() {
1166 // Tickle the accept routine.
1167 t.cl.event.Broadcast()
1171 func (t *Torrent) piecePriorityChanged(piece pieceIndex, reason string) {
1172 if t._pendingPieces.Contains(uint32(piece)) {
1173 t.iterPeers(func(c *Peer) {
1174 // if c.requestState.Interested {
1177 if !c.isLowOnRequests() {
1180 if !c.peerHasPiece(piece) {
1183 if c.requestState.Interested && c.peerChoking && !c.peerAllowedFast.Contains(piece) {
1186 c.updateRequests(reason)
1190 t.publishPieceChange(piece)
1193 func (t *Torrent) updatePiecePriority(piece pieceIndex, reason string) {
1194 if !t.closed.IsSet() {
1195 // It would be possible to filter on pure-priority changes here to avoid churning the piece
1197 t.updatePieceRequestOrder(piece)
1199 p := &t.pieces[piece]
1200 newPrio := p.uncachedPriority()
1201 // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
1202 if newPrio == PiecePriorityNone {
1203 if !t._pendingPieces.CheckedRemove(uint32(piece)) {
1207 if !t._pendingPieces.CheckedAdd(uint32(piece)) {
1211 t.piecePriorityChanged(piece, reason)
1214 func (t *Torrent) updateAllPiecePriorities(reason string) {
1215 t.updatePiecePriorities(0, t.numPieces(), reason)
1218 // Update all piece priorities in one hit. This function should have the same
1219 // output as updatePiecePriority, but across all pieces.
1220 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason string) {
1221 for i := begin; i < end; i++ {
1222 t.updatePiecePriority(i, reason)
1226 // Returns the range of pieces [begin, end) that contains the extent of bytes.
1227 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
1228 if off >= *t.length {
1238 begin = pieceIndex(off / t.info.PieceLength)
1239 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
1240 if end > pieceIndex(t.info.NumPieces()) {
1241 end = pieceIndex(t.info.NumPieces())
1246 // Returns true if all iterations complete without breaking. Returns the read regions for all
1247 // readers. The reader regions should not be merged as some callers depend on this method to
1248 // enumerate readers.
1249 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
1250 for r := range t.readers {
1252 if p.begin >= p.end {
1255 if !f(p.begin, p.end) {
1262 func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
1263 return t.piece(piece).uncachedPriority()
1266 func (t *Torrent) pendRequest(req RequestIndex) {
1267 t.piece(t.pieceIndexOfRequestIndex(req)).pendChunkIndex(req % t.chunksPerRegularPiece())
1270 func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason string) {
1271 t.cl.event.Broadcast()
1272 if t.pieceComplete(piece) {
1273 t.onPieceCompleted(piece)
1275 t.onIncompletePiece(piece)
1277 t.updatePiecePriority(piece, reason)
1280 func (t *Torrent) numReceivedConns() (ret int) {
1281 for c := range t.conns {
1282 if c.Discovery == PeerSourceIncoming {
1289 func (t *Torrent) maxHalfOpen() int {
1290 // Note that if we somehow exceed the maximum established conns, we want
1291 // the negative value to have an effect.
1292 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
1293 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
1294 // We want to allow some experimentation with new peers, and to try to
1295 // upset an oversupply of received connections.
1296 return int(min(max(5, extraIncoming)+establishedHeadroom, int64(t.cl.config.HalfOpenConnsPerTorrent)))
1299 func (t *Torrent) openNewConns() (initiated int) {
1300 defer t.updateWantPeersEvent()
1301 for t.peers.Len() != 0 {
1305 if len(t.halfOpen) >= t.maxHalfOpen() {
1308 if len(t.cl.dialers) == 0 {
1311 if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns {
1314 p := t.peers.PopMax()
1321 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
1323 uncached := t.pieceCompleteUncached(piece)
1324 cached := p.completion()
1325 changed := cached != uncached
1326 complete := uncached.Complete
1327 p.storageCompletionOk = uncached.Ok
1330 t._completedPieces.Add(x)
1333 t._completedPieces.Remove(x)
1335 p.t.updatePieceRequestOrder(piece)
1337 if complete && len(p.dirtiers) != 0 {
1338 t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
1341 log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).LogLevel(log.Debug, t.logger)
1342 t.pieceCompletionChanged(piece, "Torrent.updatePieceCompletion")
1347 // Non-blocking read. Client lock is not required.
1348 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1350 p := &t.pieces[off/t.info.PieceLength]
1351 p.waitNoPendingWrites()
1353 n1, err = p.Storage().ReadAt(b, off-p.Info().Offset())
1364 // Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on
1365 // the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage
1366 // etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately.
1367 func (t *Torrent) maybeCompleteMetadata() error {
1372 if !t.haveAllMetadataPieces() {
1373 // Don't have enough metadata pieces.
1376 err := t.setInfoBytesLocked(t.metadataBytes)
1378 t.invalidateMetadata()
1379 return fmt.Errorf("error setting info bytes: %s", err)
1381 if t.cl.config.Debug {
1382 t.logger.Printf("%s: got metadata from peers", t)
1387 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1388 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
1390 now.Add(bitmap.BitIndex(begin))
1391 readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end))
1398 func (t *Torrent) needData() bool {
1399 if t.closed.IsSet() {
1405 return !t._pendingPieces.IsEmpty()
1408 func appendMissingStrings(old, new []string) (ret []string) {
1411 for _, n := range new {
1412 for _, o := range old {
1417 ret = append(ret, n)
1422 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1424 for minNumTiers > len(ret) {
1425 ret = append(ret, nil)
1430 func (t *Torrent) addTrackers(announceList [][]string) {
1431 fullAnnounceList := &t.metainfo.AnnounceList
1432 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1433 for tierIndex, trackerURLs := range announceList {
1434 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1436 t.startMissingTrackerScrapers()
1437 t.updateWantPeersEvent()
1440 // Don't call this before the info is available.
1441 func (t *Torrent) bytesCompleted() int64 {
1445 return *t.length - t.bytesLeft()
1448 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1451 return t.setInfoBytesLocked(b)
1454 // Returns true if connection is removed from torrent.Conns.
1455 func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
1456 if !c.closed.IsSet() {
1457 panic("connection is not closed")
1458 // There are behaviours prevented by the closed state that will fail
1459 // if the connection has been deleted.
1463 // Avoid adding a drop event more than once. Probably we should track whether we've generated
1464 // the drop event against the PexConnState instead.
1466 if !t.cl.config.DisablePEX {
1470 torrent.Add("deleted connections", 1)
1471 c.deleteAllRequests("Torrent.deletePeerConn")
1472 t.assertPendingRequests()
1473 if t.numActivePeers() == 0 && len(t.connsWithAllPieces) != 0 {
1474 panic(t.connsWithAllPieces)
1479 func (t *Torrent) decPeerPieceAvailability(p *Peer) {
1480 if t.deleteConnWithAllPieces(p) {
1486 p.peerPieces().Iterate(func(i uint32) bool {
1487 p.t.decPieceAvailability(pieceIndex(i))
1492 func (t *Torrent) assertPendingRequests() {
1496 // var actual pendingRequests
1497 // if t.haveInfo() {
1498 // actual.m = make([]int, t.numChunks())
1500 // t.iterPeers(func(p *Peer) {
1501 // p.requestState.Requests.Iterate(func(x uint32) bool {
1506 // diff := cmp.Diff(actual.m, t.pendingRequests.m)
1512 func (t *Torrent) dropConnection(c *PeerConn) {
1513 t.cl.event.Broadcast()
1515 if t.deletePeerConn(c) {
1520 // Peers as in contact information for dialing out.
1521 func (t *Torrent) wantPeers() bool {
1522 if t.closed.IsSet() {
1525 if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
1528 return t.wantConns()
1531 func (t *Torrent) updateWantPeersEvent() {
1533 t.wantPeersEvent.Set()
1535 t.wantPeersEvent.Clear()
1539 // Returns whether the client should make effort to seed the torrent.
1540 func (t *Torrent) seeding() bool {
1542 if t.closed.IsSet() {
1545 if t.dataUploadDisallowed {
1548 if cl.config.NoUpload {
1551 if !cl.config.Seed {
1554 if cl.config.DisableAggressiveUpload && t.needData() {
1560 func (t *Torrent) onWebRtcConn(
1561 c datachannel.ReadWriteCloser,
1562 dcc webtorrent.DataChannelContext,
1565 netConn := webrtcNetConn{
1567 DataChannelContext: dcc,
1569 peerRemoteAddr := netConn.RemoteAddr()
1570 if t.cl.badPeerAddr(peerRemoteAddr) {
1573 pc, err := t.cl.initiateProtocolHandshakes(
1574 context.Background(),
1579 netConn.RemoteAddr(),
1581 fmt.Sprintf("webrtc offer_id %x: %v", dcc.OfferId, regularNetConnPeerConnConnString(netConn)),
1584 t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
1587 if dcc.LocalOffered {
1588 pc.Discovery = PeerSourceTracker
1590 pc.Discovery = PeerSourceIncoming
1592 pc.conn.SetWriteDeadline(time.Time{})
1595 err = t.cl.runHandshookConn(pc, t)
1597 t.logger.WithDefaultLevel(log.Critical).Printf("error running handshook webrtc conn: %v", err)
1601 func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
1602 err := t.cl.runHandshookConn(pc, t)
1603 if err != nil || logAll {
1604 t.logger.WithDefaultLevel(level).Printf("error running handshook conn: %v", err)
1608 func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
1609 t.logRunHandshookConn(pc, false, log.Debug)
1612 func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
1613 wtc, release := t.cl.websocketTrackers.Get(u.String())
1618 wst := websocketTrackerStatus{u, wtc}
1620 err := wtc.Announce(tracker.Started, t.infoHash)
1622 t.logger.WithDefaultLevel(log.Warning).Printf(
1623 "error in initial announce to %q: %v",
1631 func (t *Torrent) startScrapingTracker(_url string) {
1635 u, err := url.Parse(_url)
1637 // URLs with a leading '*' appear to be a uTorrent convention to
1638 // disable trackers.
1640 log.Str("error parsing tracker url").AddValues("url", _url).Log(t.logger)
1644 if u.Scheme == "udp" {
1646 t.startScrapingTracker(u.String())
1648 t.startScrapingTracker(u.String())
1651 if _, ok := t.trackerAnnouncers[_url]; ok {
1654 sl := func() torrentTrackerAnnouncer {
1657 if t.cl.config.DisableWebtorrent {
1660 return t.startWebsocketAnnouncer(*u)
1662 if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 {
1666 if t.cl.config.DisableIPv6 {
1670 newAnnouncer := &trackerScraper{
1673 lookupTrackerIp: t.cl.config.LookupTrackerIp,
1675 go newAnnouncer.Run()
1681 if t.trackerAnnouncers == nil {
1682 t.trackerAnnouncers = make(map[string]torrentTrackerAnnouncer)
1684 t.trackerAnnouncers[_url] = sl
1687 // Adds and starts tracker scrapers for tracker URLs that aren't already
1689 func (t *Torrent) startMissingTrackerScrapers() {
1690 if t.cl.config.DisableTrackers {
1693 t.startScrapingTracker(t.metainfo.Announce)
1694 for _, tier := range t.metainfo.AnnounceList {
1695 for _, url := range tier {
1696 t.startScrapingTracker(url)
1701 // Returns an AnnounceRequest with fields filled out to defaults and current
1703 func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
1704 // Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's
1705 // dependent on the network in use.
1706 return tracker.AnnounceRequest{
1708 NumWant: func() int32 {
1709 if t.wantPeers() && len(t.cl.dialers) > 0 {
1715 Port: uint16(t.cl.incomingPeerPort()),
1716 PeerId: t.cl.peerID,
1717 InfoHash: t.infoHash,
1718 Key: t.cl.announceKey(),
1720 // The following are vaguely described in BEP 3.
1722 Left: t.bytesLeftAnnounce(),
1723 Uploaded: t.stats.BytesWrittenData.Int64(),
1724 // There's no mention of wasted or unwanted download in the BEP.
1725 Downloaded: t.stats.BytesReadUsefulData.Int64(),
1729 // Adds peers revealed in an announce until the announce ends, or we have
1731 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
1733 for v := range pvs {
1736 for _, cp := range v.Peers {
1738 // Can't do anything with this.
1741 if t.addPeer(PeerInfo{
1742 Addr: ipPortAddr{cp.IP, cp.Port},
1743 Source: PeerSourceDhtGetPeers,
1750 // log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
1755 // Announce using the provided DHT server. Peers are consumed automatically. done is closed when the
1756 // announce ends. stop will force the announce to end.
1757 func (t *Torrent) AnnounceToDht(s DhtServer) (done <-chan struct{}, stop func(), err error) {
1758 ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), true)
1762 _done := make(chan struct{})
1766 t.consumeDhtAnnouncePeers(ps.Peers())
1772 func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error {
1773 _, stop, err := t.AnnounceToDht(s)
1778 case <-t.closed.Done():
1779 case <-time.After(5 * time.Minute):
1785 func (t *Torrent) dhtAnnouncer(s DhtServer) {
1791 if t.closed.IsSet() {
1794 // We're also announcing ourselves as a listener, so we don't just want peer addresses.
1795 // TODO: We can include the announce_peer step depending on whether we can receive
1796 // inbound connections. We should probably only announce once every 15 mins too.
1800 // TODO: Determine if there's a listener on the port we're announcing.
1801 if len(cl.dialers) == 0 && len(cl.listeners) == 0 {
1812 err := t.timeboxedAnnounceToDht(s)
1814 t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err)
1820 func (t *Torrent) addPeers(peers []PeerInfo) (added int) {
1821 for _, p := range peers {
1829 // The returned TorrentStats may require alignment in memory. See
1830 // https://github.com/anacrolix/torrent/issues/383.
1831 func (t *Torrent) Stats() TorrentStats {
1833 defer t.cl.rUnlock()
1834 return t.statsLocked()
1837 func (t *Torrent) statsLocked() (ret TorrentStats) {
1838 ret.ActivePeers = len(t.conns)
1839 ret.HalfOpenPeers = len(t.halfOpen)
1840 ret.PendingPeers = t.peers.Len()
1841 ret.TotalPeers = t.numTotalPeers()
1842 ret.ConnectedSeeders = 0
1843 for c := range t.conns {
1844 if all, ok := c.peerHasAllPieces(); all && ok {
1845 ret.ConnectedSeeders++
1848 ret.ConnStats = t.stats.Copy()
1849 ret.PiecesComplete = t.numPiecesCompleted()
1853 // The total number of peers in the torrent.
1854 func (t *Torrent) numTotalPeers() int {
1855 peers := make(map[string]struct{})
1856 for conn := range t.conns {
1857 ra := conn.conn.RemoteAddr()
1859 // It's been closed and doesn't support RemoteAddr.
1862 peers[ra.String()] = struct{}{}
1864 for addr := range t.halfOpen {
1865 peers[addr] = struct{}{}
1867 t.peers.Each(func(peer PeerInfo) {
1868 peers[peer.Addr.String()] = struct{}{}
1873 // Reconcile bytes transferred before connection was associated with a
1875 func (t *Torrent) reconcileHandshakeStats(c *PeerConn) {
1876 if c._stats != (ConnStats{
1877 // Handshakes should only increment these fields:
1878 BytesWritten: c._stats.BytesWritten,
1879 BytesRead: c._stats.BytesRead,
1883 c.postHandshakeStats(func(cs *ConnStats) {
1884 cs.BytesRead.Add(c._stats.BytesRead.Int64())
1885 cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
1887 c.reconciledHandshakeStats = true
1890 // Returns true if the connection is added.
1891 func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
1894 torrent.Add("added connections", 1)
1897 if t.closed.IsSet() {
1898 return errors.New("torrent closed")
1900 for c0 := range t.conns {
1901 if c.PeerID != c0.PeerID {
1904 if !t.cl.config.DropDuplicatePeerIds {
1907 if c.hasPreferredNetworkOver(c0) {
1909 t.deletePeerConn(c0)
1911 return errors.New("existing connection preferred")
1914 if len(t.conns) >= t.maxEstablishedConns {
1915 c := t.worstBadConn()
1917 return errors.New("don't want conns")
1922 if len(t.conns) >= t.maxEstablishedConns {
1925 t.conns[c] = struct{}{}
1926 if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() {
1927 t.pex.Add(c) // as no further extended handshake expected
1932 func (t *Torrent) wantConns() bool {
1933 if !t.networkingEnabled.Bool() {
1936 if t.closed.IsSet() {
1939 if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
1942 return len(t.conns) < t.maxEstablishedConns || t.worstBadConn() != nil
1945 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1948 oldMax = t.maxEstablishedConns
1949 t.maxEstablishedConns = max
1950 wcs := worseConnSlice{
1951 conns: t.appendConns(nil, func(*PeerConn) bool {
1957 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1958 t.dropConnection(heap.Pop(&wcs).(*PeerConn))
1964 func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
1965 t.logger.LazyLog(log.Debug, func() log.Msg {
1966 return log.Fstr("hashed piece %d (passed=%t)", piece, passed)
1970 t.cl.event.Broadcast()
1971 if t.closed.IsSet() {
1975 // Don't score the first time a piece is hashed, it could be an initial check.
1976 if p.storageCompletionOk {
1978 pieceHashedCorrect.Add(1)
1981 "piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
1982 ).AddValues(t, p).LogLevel(
1984 log.Debug, t.logger)
1986 pieceHashedNotCorrect.Add(1)
1991 t.publishPieceChange(piece)
1994 t.publishPieceChange(piece)
1998 if len(p.dirtiers) != 0 {
1999 // Don't increment stats above connection-level for every involved connection.
2000 t.allStats((*ConnStats).incrementPiecesDirtiedGood)
2002 for c := range p.dirtiers {
2003 c._stats.incrementPiecesDirtiedGood()
2005 t.clearPieceTouchers(piece)
2007 err := p.Storage().MarkComplete()
2009 t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
2013 if t.closed.IsSet() {
2016 t.pendAllChunkSpecs(piece)
2018 if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil {
2019 // Peers contributed to all the data for this piece hash failure, and the failure was
2020 // not due to errors in the storage (such as data being dropped in a cache).
2022 // Increment Torrent and above stats, and then specific connections.
2023 t.allStats((*ConnStats).incrementPiecesDirtiedBad)
2024 for c := range p.dirtiers {
2025 // Y u do dis peer?!
2026 c.stats().incrementPiecesDirtiedBad()
2029 bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
2030 for c := range p.dirtiers {
2032 bannableTouchers = append(bannableTouchers, c)
2035 t.clearPieceTouchers(piece)
2036 slices.Sort(bannableTouchers, connLessTrusted)
2038 if t.cl.config.Debug {
2040 "bannable conns by trust for piece %d: %v",
2042 func() (ret []connectionTrust) {
2043 for _, c := range bannableTouchers {
2044 ret = append(ret, c.trust())
2051 if len(bannableTouchers) >= 1 {
2052 c := bannableTouchers[0]
2053 if len(bannableTouchers) != 1 {
2054 t.logger.Levelf(log.Warning, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece)
2056 // Turns out it's still useful to ban peers like this because if there's only a
2057 // single peer for a piece, and we never progress that piece to completion, we
2058 // will never smart-ban them. Discovered in
2059 // https://github.com/anacrolix/torrent/issues/715.
2060 t.logger.Levelf(log.Warning, "banning %v for being sole dirtier of piece %v after failed piece check", c, piece)
2065 t.onIncompletePiece(piece)
2066 p.Storage().MarkNotComplete()
2068 t.updatePieceCompletion(piece)
2071 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
2072 start := t.pieceRequestIndexOffset(piece)
2073 end := start + t.pieceNumChunks(piece)
2074 for ri := start; ri < end; ri++ {
2079 func (t *Torrent) onPieceCompleted(piece pieceIndex) {
2080 t.pendAllChunkSpecs(piece)
2081 t.cancelRequestsForPiece(piece)
2082 t.piece(piece).readerCond.Broadcast()
2083 for conn := range t.conns {
2085 t.maybeDropMutuallyCompletePeer(&conn.Peer)
2089 // Called when a piece is found to be not complete.
2090 func (t *Torrent) onIncompletePiece(piece pieceIndex) {
2091 if t.pieceAllDirty(piece) {
2092 t.pendAllChunkSpecs(piece)
2094 if !t.wantPieceIndex(piece) {
2095 // t.logger.Printf("piece %d incomplete and unwanted", piece)
2098 // We could drop any connections that we told we have a piece that we
2099 // don't here. But there's a test failure, and it seems clients don't care
2100 // if you request pieces that you already claim to have. Pruning bad
2101 // connections might just remove any connections that aren't treating us
2102 // favourably anyway.
2104 // for c := range t.conns {
2105 // if c.sentHave(piece) {
2109 t.iterPeers(func(conn *Peer) {
2110 if conn.peerHasPiece(piece) {
2111 conn.updateRequests("piece incomplete")
2116 func (t *Torrent) tryCreateMorePieceHashers() {
2117 for !t.closed.IsSet() && t.activePieceHashes < 2 && t.tryCreatePieceHasher() {
2121 func (t *Torrent) tryCreatePieceHasher() bool {
2122 if t.storage == nil {
2125 pi, ok := t.getPieceToHash()
2130 t.piecesQueuedForHash.Remove(bitmap.BitIndex(pi))
2132 t.publishPieceChange(pi)
2133 t.updatePiecePriority(pi, "Torrent.tryCreatePieceHasher")
2134 t.storageLock.RLock()
2135 t.activePieceHashes++
2136 go t.pieceHasher(pi)
2140 func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
2141 t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
2142 if t.piece(i).hashing {
2152 func (t *Torrent) dropBannedPeers() {
2153 t.iterPeers(func(p *Peer) {
2154 remoteIp := p.remoteIp()
2155 if remoteIp == nil {
2156 if p.bannableAddr.Ok() {
2157 t.logger.WithDefaultLevel(log.Debug).Printf("can't get remote ip for peer %v", p)
2161 netipAddr := netip.MustParseAddr(remoteIp.String())
2162 if Some(netipAddr) != p.bannableAddr {
2163 t.logger.WithDefaultLevel(log.Debug).Printf(
2164 "peer remote ip does not match its bannable addr [peer=%v, remote ip=%v, bannable addr=%v]",
2165 p, remoteIp, p.bannableAddr)
2167 if _, ok := t.cl.badPeerIPs[netipAddr]; ok {
2168 // Should this be a close?
2170 t.logger.WithDefaultLevel(log.Debug).Printf("dropped %v for banned remote IP %v", p, netipAddr)
2175 func (t *Torrent) pieceHasher(index pieceIndex) {
2177 sum, failedPeers, copyErr := t.hashPiece(index)
2178 correct := sum == *p.hash
2182 log.Fmsg("piece %v (%s) hash failure copy error: %v", p, p.hash.HexString(), copyErr).Log(t.logger)
2184 t.storageLock.RUnlock()
2188 for peer := range failedPeers {
2189 t.cl.banPeerIP(peer.AsSlice())
2190 t.logger.WithDefaultLevel(log.Debug).Printf("smart banned %v for piece %v", peer, index)
2193 for ri := t.pieceRequestIndexOffset(index); ri < t.pieceRequestIndexOffset(index+1); ri++ {
2194 t.smartBanCache.ForgetBlock(ri)
2198 t.pieceHashed(index, correct, copyErr)
2199 t.updatePiecePriority(index, "Torrent.pieceHasher")
2200 t.activePieceHashes--
2201 t.tryCreateMorePieceHashers()
2204 // Return the connections that touched a piece, and clear the entries while doing it.
2205 func (t *Torrent) clearPieceTouchers(pi pieceIndex) {
2207 for c := range p.dirtiers {
2208 delete(c.peerTouchedPieces, pi)
2209 delete(p.dirtiers, c)
2213 func (t *Torrent) peersAsSlice() (ret []*Peer) {
2214 t.iterPeers(func(p *Peer) {
2215 ret = append(ret, p)
2220 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
2221 piece := t.piece(pieceIndex)
2222 if piece.queuedForHash() {
2225 t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
2226 t.publishPieceChange(pieceIndex)
2227 t.updatePiecePriority(pieceIndex, "Torrent.queuePieceCheck")
2228 t.tryCreateMorePieceHashers()
2231 // Forces all the pieces to be re-hashed. See also Piece.VerifyData. This should not be called
2232 // before the Info is available.
2233 func (t *Torrent) VerifyData() {
2234 for i := pieceIndex(0); i < t.NumPieces(); i++ {
2235 t.Piece(i).VerifyData()
2239 // Start the process of connecting to the given peer for the given torrent if appropriate.
2240 func (t *Torrent) initiateConn(peer PeerInfo) {
2241 if peer.Id == t.cl.peerID {
2244 if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted {
2248 if t.addrActive(addr.String()) {
2252 t.halfOpen[addr.String()] = peer
2253 go t.cl.outgoingConnection(t, addr, peer.Source, peer.Trusted)
2256 // Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to
2257 // quickly make one Client visible to the Torrent of another Client.
2258 func (t *Torrent) AddClientPeer(cl *Client) int {
2259 return t.AddPeers(func() (ps []PeerInfo) {
2260 for _, la := range cl.ListenAddrs() {
2261 ps = append(ps, PeerInfo{
2270 // All stats that include this Torrent. Useful when we want to increment ConnStats but not for every
2272 func (t *Torrent) allStats(f func(*ConnStats)) {
2277 func (t *Torrent) hashingPiece(i pieceIndex) bool {
2278 return t.pieces[i].hashing
2281 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
2282 return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
2285 func (t *Torrent) dialTimeout() time.Duration {
2286 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
2289 func (t *Torrent) piece(i int) *Piece {
2293 func (t *Torrent) onWriteChunkErr(err error) {
2294 if t.userOnWriteChunkErr != nil {
2295 go t.userOnWriteChunkErr(err)
2298 t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download")
2299 t.disallowDataDownloadLocked()
2302 func (t *Torrent) DisallowDataDownload() {
2303 t.disallowDataDownloadLocked()
2306 func (t *Torrent) disallowDataDownloadLocked() {
2307 t.dataDownloadDisallowed.Set()
2310 func (t *Torrent) AllowDataDownload() {
2311 t.dataDownloadDisallowed.Clear()
2314 // Enables uploading data, if it was disabled.
2315 func (t *Torrent) AllowDataUpload() {
2318 t.dataUploadDisallowed = false
2319 for c := range t.conns {
2320 c.updateRequests("allow data upload")
2324 // Disables uploading data, if it was enabled.
2325 func (t *Torrent) DisallowDataUpload() {
2328 t.dataUploadDisallowed = true
2329 for c := range t.conns {
2330 // TODO: This doesn't look right. Shouldn't we tickle writers to choke peers or something instead?
2331 c.updateRequests("disallow data upload")
2335 // Sets a handler that is called if there's an error writing a chunk to local storage. By default,
2336 // or if nil, a critical message is logged, and data download is disabled.
2337 func (t *Torrent) SetOnWriteChunkError(f func(error)) {
2340 t.userOnWriteChunkErr = f
2343 func (t *Torrent) iterPeers(f func(p *Peer)) {
2344 for pc := range t.conns {
2347 for _, ws := range t.webSeeds {
2352 func (t *Torrent) callbacks() *Callbacks {
2353 return &t.cl.config.Callbacks
2356 type AddWebSeedsOpt func(*webseed.Client)
2358 // Sets the WebSeed trailing path escaper for a webseed.Client.
2359 func WebSeedPathEscaper(custom webseed.PathEscaper) AddWebSeedsOpt {
2360 return func(c *webseed.Client) {
2361 c.PathEscaper = custom
2365 func (t *Torrent) AddWebSeeds(urls []string, opts ...AddWebSeedsOpt) {
2368 for _, u := range urls {
2369 t.addWebSeed(u, opts...)
2373 func (t *Torrent) addWebSeed(url string, opts ...AddWebSeedsOpt) {
2374 if t.cl.config.DisableWebseeds {
2377 if _, ok := t.webSeeds[url]; ok {
2380 // I don't think Go http supports pipelining requests. However, we can have more ready to go
2381 // right away. This value should be some multiple of the number of connections to a host. I
2382 // would expect that double maxRequests plus a bit would be appropriate. This value is based on
2383 // downloading Sintel (08ada5a7a6183aae1e09d831df6748d566095a10) from
2384 // "https://webtorrent.io/torrents/".
2385 const maxRequests = 16
2391 reconciledHandshakeStats: true,
2392 // This should affect how often we have to recompute requests for this peer. Note that
2393 // because we can request more than 1 thing at a time over HTTP, we will hit the low
2394 // requests mark more often, so recomputation is probably sooner than with regular peer
2395 // conns. ~4x maxRequests would be about right.
2396 PeerMaxRequests: 128,
2397 // TODO: Set ban prefix?
2398 RemoteAddr: remoteAddrFromUrl(url),
2399 callbacks: t.callbacks(),
2401 client: webseed.Client{
2402 HttpClient: t.cl.httpClient,
2404 ResponseBodyWrapper: func(r io.Reader) io.Reader {
2405 return &rateLimitedReader{
2406 l: t.cl.config.DownloadRateLimiter,
2411 activeRequests: make(map[Request]webseed.Request, maxRequests),
2412 maxRequests: maxRequests,
2414 ws.peer.initRequestState()
2415 for _, opt := range opts {
2418 ws.peer.initUpdateRequestsTimer()
2419 ws.requesterCond.L = t.cl.locker()
2420 for i := 0; i < maxRequests; i += 1 {
2423 for _, f := range t.callbacks().NewPeer {
2426 ws.peer.logger = t.logger.WithContextValue(&ws)
2427 ws.peer.peerImpl = &ws
2429 ws.onGotInfo(t.info)
2431 t.webSeeds[url] = &ws.peer
2434 func (t *Torrent) peerIsActive(p *Peer) (active bool) {
2435 t.iterPeers(func(p1 *Peer) {
2443 func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request {
2444 index := t.pieceIndexOfRequestIndex(ri)
2447 t.piece(index).chunkIndexSpec(ri % t.chunksPerRegularPiece()),
2451 func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex {
2452 return t.pieceRequestIndexOffset(pieceIndex(r.Index)) + RequestIndex(r.Begin/t.chunkSize)
2455 func (t *Torrent) pieceRequestIndexOffset(piece pieceIndex) RequestIndex {
2456 return RequestIndex(piece) * t.chunksPerRegularPiece()
2459 func (t *Torrent) updateComplete() {
2460 t.Complete.SetBool(t.haveAllPieces())
2463 func (t *Torrent) cancelRequest(r RequestIndex) *Peer {
2464 p := t.requestingPeer(r)
2468 // TODO: This is a check that an old invariant holds. It can be removed after some testing.
2469 //delete(t.pendingRequests, r)
2470 var zeroRequestState requestState
2471 if t.requestState[r] != zeroRequestState {
2472 panic("expected request state to be gone")
2477 func (t *Torrent) requestingPeer(r RequestIndex) *Peer {
2478 return t.requestState[r].peer
2481 func (t *Torrent) addConnWithAllPieces(p *Peer) {
2482 if t.connsWithAllPieces == nil {
2483 t.connsWithAllPieces = make(map[*Peer]struct{}, t.maxEstablishedConns)
2485 t.connsWithAllPieces[p] = struct{}{}
2488 func (t *Torrent) deleteConnWithAllPieces(p *Peer) bool {
2489 _, ok := t.connsWithAllPieces[p]
2490 delete(t.connsWithAllPieces, p)
2494 func (t *Torrent) numActivePeers() int {
2495 return len(t.conns) + len(t.webSeeds)
2498 func (t *Torrent) hasStorageCap() bool {
2499 f := t.storage.Capacity
2507 func (t *Torrent) pieceIndexOfRequestIndex(ri RequestIndex) pieceIndex {
2508 return pieceIndex(ri / t.chunksPerRegularPiece())
2511 type requestState struct {