20 "github.com/RoaringBitmap/roaring"
21 "github.com/anacrolix/chansync"
22 "github.com/anacrolix/chansync/events"
23 "github.com/anacrolix/dht/v2"
24 . "github.com/anacrolix/generics"
25 g "github.com/anacrolix/generics"
26 "github.com/anacrolix/log"
27 "github.com/anacrolix/missinggo/slices"
28 "github.com/anacrolix/missinggo/v2"
29 "github.com/anacrolix/missinggo/v2/bitmap"
30 "github.com/anacrolix/missinggo/v2/pubsub"
31 "github.com/anacrolix/multiless"
32 "github.com/anacrolix/sync"
33 "github.com/pion/datachannel"
34 "golang.org/x/exp/maps"
36 "github.com/anacrolix/torrent/bencode"
37 "github.com/anacrolix/torrent/common"
38 "github.com/anacrolix/torrent/internal/check"
39 "github.com/anacrolix/torrent/internal/nestedmaps"
40 "github.com/anacrolix/torrent/metainfo"
41 pp "github.com/anacrolix/torrent/peer_protocol"
42 utHolepunch "github.com/anacrolix/torrent/peer_protocol/ut-holepunch"
43 request_strategy "github.com/anacrolix/torrent/request-strategy"
44 "github.com/anacrolix/torrent/segments"
45 "github.com/anacrolix/torrent/storage"
46 "github.com/anacrolix/torrent/tracker"
47 typedRoaring "github.com/anacrolix/torrent/typed-roaring"
48 "github.com/anacrolix/torrent/webseed"
49 "github.com/anacrolix/torrent/webtorrent"
52 // Maintains state of torrent within a Client. Many methods should not be called before the info is
53 // available, see .Info and .GotInfo.
55 // Torrent-level aggregate statistics. First in struct to ensure 64-bit
56 // alignment. See #262.
61 networkingEnabled chansync.Flag
62 dataDownloadDisallowed chansync.Flag
63 dataUploadDisallowed bool
64 userOnWriteChunkErr func(error)
66 closed chansync.SetOnce
68 infoHash metainfo.Hash
71 // The order pieces are requested if there's no stronger reason like availability or priority.
72 pieceRequestOrder []int
73 // Values are the piece indices that changed.
74 pieceStateChanges pubsub.PubSub[PieceStateChange]
75 // The size of chunks to request from peers over the wire. This is
76 // normally 16KiB by convention these days.
79 // Total length of the torrent in bytes. Stored because it's not O(1) to
80 // get this from the info dict.
83 // The storage to open when the info dict becomes available.
84 storageOpener *storage.Client
85 // Storage for torrent data.
86 storage *storage.Torrent
87 // Read-locked for using storage, and write-locked for Closing.
88 storageLock sync.RWMutex
90 // TODO: Only announce stuff is used?
91 metainfo metainfo.MetaInfo
93 // The info dict. nil if we don't have it (yet).
95 fileIndex segments.Index
98 _chunksPerRegularPiece chunkIndexType
100 webSeeds map[string]*Peer
101 // Active peer connections, running message stream loops. TODO: Make this
102 // open (not-closed) connections only.
103 conns map[*PeerConn]struct{}
104 maxEstablishedConns int
105 // Set of addrs to which we're attempting to connect. Connections are
106 // half-open until all handshakes are completed.
107 halfOpen map[string]map[outgoingConnAttemptKey]*PeerInfo
109 // Reserve of peers to connect to. A peer can be both here and in the
110 // active connections if were told about the peer after connecting with
111 // them. That encourages us to reconnect to peers that are well known in
113 peers prioritizedPeers
114 // Whether we want to know more peers.
115 wantPeersEvent missinggo.Event
116 // An announcer for each tracker URL.
117 trackerAnnouncers map[string]torrentTrackerAnnouncer
118 // How many times we've initiated a DHT announce. TODO: Move into stats.
121 // Name used if the info name isn't available. Should be cleared when the
122 // Info does become available.
126 // The bencoded bytes of the info dict. This is actively manipulated if
127 // the info bytes aren't initially available, and we try to fetch them
130 // Each element corresponds to the 16KiB metadata pieces. If true, we have
131 // received that piece.
132 metadataCompletedChunks []bool
133 metadataChanged sync.Cond
135 // Closed when .Info is obtained.
136 gotMetainfoC chan struct{}
138 readers map[*reader]struct{}
139 _readerNowPieces bitmap.Bitmap
140 _readerReadaheadPieces bitmap.Bitmap
142 // A cache of pieces we need to get. Calculated from various piece and
143 // file priorities and completion states elsewhere.
144 _pendingPieces roaring.Bitmap
145 // A cache of completed piece indices.
146 _completedPieces roaring.Bitmap
147 // Pieces that need to be hashed.
148 piecesQueuedForHash bitmap.Bitmap
149 activePieceHashes int
150 initialPieceCheckDisabled bool
152 connsWithAllPieces map[*Peer]struct{}
154 requestState map[RequestIndex]requestState
155 // Chunks we've written to since the corresponding piece was last checked.
156 dirtyChunks typedRoaring.Bitmap[RequestIndex]
160 // Is On when all pieces are complete.
161 Complete chansync.Flag
163 // Torrent sources in use keyed by the source string.
164 activeSources sync.Map
165 sourcesLogger log.Logger
167 smartBanCache smartBanCache
169 // Large allocations reused between request state updates.
170 requestPieceStates []request_strategy.PieceRequestOrderState
171 requestIndexes []RequestIndex
176 type outgoingConnAttemptKey = *PeerInfo
178 func (t *Torrent) length() int64 {
179 return t._length.Value
182 func (t *Torrent) selectivePieceAvailabilityFromPeers(i pieceIndex) (count int) {
183 // This could be done with roaring.BitSliceIndexing.
184 t.iterPeers(func(peer *Peer) {
185 if _, ok := t.connsWithAllPieces[peer]; ok {
188 if peer.peerHasPiece(i) {
195 func (t *Torrent) decPieceAvailability(i pieceIndex) {
200 if p.relativeAvailability <= 0 {
201 panic(p.relativeAvailability)
203 p.relativeAvailability--
204 t.updatePieceRequestOrderPiece(i)
207 func (t *Torrent) incPieceAvailability(i pieceIndex) {
208 // If we don't the info, this should be reconciled when we do.
211 p.relativeAvailability++
212 t.updatePieceRequestOrderPiece(i)
216 func (t *Torrent) readerNowPieces() bitmap.Bitmap {
217 return t._readerNowPieces
220 func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
221 return t._readerReadaheadPieces
224 func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool {
225 return !t.wantPieceIndex(i)
228 // Returns a channel that is closed when the Torrent is closed.
229 func (t *Torrent) Closed() events.Done {
230 return t.closed.Done()
233 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
234 // pending, and half-open peers.
235 func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
236 // Add pending peers to the list
237 t.peers.Each(func(peer PeerInfo) {
238 ks = append(ks, peer)
241 // Add half-open peers to the list
242 for _, attempts := range t.halfOpen {
243 for _, peer := range attempts {
244 ks = append(ks, *peer)
248 // Add active peers to the list
251 for conn := range t.conns {
252 ks = append(ks, PeerInfo{
254 Addr: conn.RemoteAddr,
255 Source: conn.Discovery,
256 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
257 // > But if we're not connected to them with an encrypted connection, I couldn't say
258 // > what's appropriate. We can carry forward the SupportsEncryption value as we
259 // > received it from trackers/DHT/PEX, or just use the encryption state for the
260 // > connection. It's probably easiest to do the latter for now.
261 // https://github.com/anacrolix/torrent/pull/188
262 SupportsEncryption: conn.headerEncrypted,
269 func (t *Torrent) setChunkSize(size pp.Integer) {
271 t.chunkPool = sync.Pool{
272 New: func() interface{} {
273 b := make([]byte, size)
279 func (t *Torrent) pieceComplete(piece pieceIndex) bool {
280 return t._completedPieces.Contains(bitmap.BitIndex(piece))
283 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
284 if t.storage == nil {
285 return storage.Completion{Complete: false, Ok: true}
287 return t.pieces[piece].Storage().Completion()
290 // There's a connection to that address already.
291 func (t *Torrent) addrActive(addr string) bool {
292 if _, ok := t.halfOpen[addr]; ok {
295 for c := range t.conns {
297 if ra.String() == addr {
304 func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn {
305 return t.appendConns(ret, func(conn *PeerConn) bool {
306 return !conn.closed.IsSet()
310 func (t *Torrent) appendConns(ret []*PeerConn, f func(*PeerConn) bool) []*PeerConn {
311 for c := range t.conns {
319 func (t *Torrent) addPeer(p PeerInfo) (added bool) {
321 torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1)
322 if t.closed.IsSet() {
325 if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok {
326 if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) {
327 torrent.Add("peers not added because of bad addr", 1)
328 // cl.logger.Printf("peers not added because of bad addr: %v", p)
332 if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok {
333 torrent.Add("peers replaced", 1)
334 if !replaced.equal(p) {
335 t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced)
342 for t.peers.Len() > cl.config.TorrentPeersHighWater {
343 _, ok := t.peers.DeleteMin()
345 torrent.Add("excess reserve peers discarded", 1)
351 func (t *Torrent) invalidateMetadata() {
352 for i := 0; i < len(t.metadataCompletedChunks); i++ {
353 t.metadataCompletedChunks[i] = false
356 t.gotMetainfoC = make(chan struct{})
361 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
365 if index >= len(t.metadataCompletedChunks) {
366 t.logger.Printf("%s: ignoring metadata piece %d", t, index)
369 copy(t.metadataBytes[(1<<14)*index:], data)
370 t.metadataCompletedChunks[index] = true
373 func (t *Torrent) metadataPieceCount() int {
374 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
377 func (t *Torrent) haveMetadataPiece(piece int) bool {
379 return (1<<14)*piece < len(t.metadataBytes)
381 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
385 func (t *Torrent) metadataSize() int {
386 return len(t.metadataBytes)
389 func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
390 for i := 0; i < len(info.Pieces); i += sha1.Size {
391 ret = append(ret, info.Pieces[i:i+sha1.Size])
396 func (t *Torrent) makePieces() {
397 hashes := infoPieceHashes(t.info)
398 t.pieces = make([]Piece, len(hashes))
399 for i, hash := range hashes {
400 piece := &t.pieces[i]
402 piece.index = pieceIndex(i)
403 piece.noPendingWrites.L = &piece.pendingWritesMutex
404 piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
406 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
407 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
408 piece.files = files[beginFile:endFile]
412 // Returns the index of the first file containing the piece. files must be
413 // ordered by offset.
414 func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
415 for i, f := range files {
416 if f.offset+f.length > pieceOffset {
423 // Returns the index after the last file containing the piece. files must be
424 // ordered by offset.
425 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
426 for i, f := range files {
427 if f.offset+f.length >= pieceEndOffset {
434 func (t *Torrent) cacheLength() {
436 for _, f := range t.info.UpvertedFiles() {
442 // TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
444 func (t *Torrent) setInfo(info *metainfo.Info) error {
445 if err := validateInfo(info); err != nil {
446 return fmt.Errorf("bad info: %s", err)
448 if t.storageOpener != nil {
450 t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
452 return fmt.Errorf("error opening torrent storage: %s", err)
458 t._chunksPerRegularPiece = chunkIndexType((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
460 t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
461 t.displayName = "" // Save a few bytes lol.
468 func (t *Torrent) pieceRequestOrderKey(i int) request_strategy.PieceRequestOrderKey {
469 return request_strategy.PieceRequestOrderKey{
470 InfoHash: t.infoHash,
475 // This seems to be all the follow-up tasks after info is set, that can't fail.
476 func (t *Torrent) onSetInfo() {
477 t.pieceRequestOrder = rand.Perm(t.numPieces())
478 t.initPieceRequestOrder()
479 MakeSliceWithLength(&t.requestPieceStates, t.numPieces())
480 for i := range t.pieces {
482 // Need to add relativeAvailability before updating piece completion, as that may result in conns
484 if p.relativeAvailability != 0 {
485 panic(p.relativeAvailability)
487 p.relativeAvailability = t.selectivePieceAvailabilityFromPeers(i)
488 t.addRequestOrderPiece(i)
489 t.updatePieceCompletion(i)
490 if !t.initialPieceCheckDisabled && !p.storageCompletionOk {
491 // t.logger.Printf("piece %s completion unknown, queueing check", p)
495 t.cl.event.Broadcast()
496 close(t.gotMetainfoC)
497 t.updateWantPeersEvent()
498 t.requestState = make(map[RequestIndex]requestState)
499 t.tryCreateMorePieceHashers()
500 t.iterPeers(func(p *Peer) {
502 p.updateRequests("onSetInfo")
506 // Called when metadata for a torrent becomes available.
507 func (t *Torrent) setInfoBytesLocked(b []byte) error {
508 if metainfo.HashBytes(b) != t.infoHash {
509 return errors.New("info bytes have wrong hash")
511 var info metainfo.Info
512 if err := bencode.Unmarshal(b, &info); err != nil {
513 return fmt.Errorf("error unmarshalling info bytes: %s", err)
516 t.metadataCompletedChunks = nil
520 if err := t.setInfo(&info); err != nil {
527 func (t *Torrent) haveAllMetadataPieces() bool {
531 if t.metadataCompletedChunks == nil {
534 for _, have := range t.metadataCompletedChunks {
542 // TODO: Propagate errors to disconnect peer.
543 func (t *Torrent) setMetadataSize(size int) (err error) {
545 // We already know the correct metadata size.
548 if uint32(size) > maxMetadataSize {
549 return log.WithLevel(log.Warning, errors.New("bad size"))
551 if len(t.metadataBytes) == size {
554 t.metadataBytes = make([]byte, size)
555 t.metadataCompletedChunks = make([]bool, (size+(1<<14)-1)/(1<<14))
556 t.metadataChanged.Broadcast()
557 for c := range t.conns {
558 c.requestPendingMetadata()
563 // The current working name for the torrent. Either the name in the info dict,
564 // or a display name given such as by the dn value in a magnet link, or "".
565 func (t *Torrent) name() string {
567 defer t.nameMu.RUnlock()
569 return t.info.BestName()
571 if t.displayName != "" {
574 return "infohash:" + t.infoHash.HexString()
577 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
578 p := &t.pieces[index]
579 ret.Priority = t.piecePriority(index)
580 ret.Completion = p.completion()
581 ret.QueuedForHash = p.queuedForHash()
582 ret.Hashing = p.hashing
583 ret.Checking = ret.QueuedForHash || ret.Hashing
584 ret.Marking = p.marking
585 if !ret.Complete && t.piecePartiallyDownloaded(index) {
591 func (t *Torrent) metadataPieceSize(piece int) int {
592 return metadataPieceSize(len(t.metadataBytes), piece)
595 func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType pp.ExtendedMetadataRequestMsgType, piece int, data []byte) pp.Message {
598 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
599 ExtendedPayload: append(bencode.MustMarshal(pp.ExtendedMetadataRequestMsg{
601 TotalSize: len(t.metadataBytes),
607 type pieceAvailabilityRun struct {
612 func (me pieceAvailabilityRun) String() string {
613 return fmt.Sprintf("%v(%v)", me.Count, me.Availability)
616 func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) {
617 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
618 ret = append(ret, pieceAvailabilityRun{Availability: el.(int), Count: int(count)})
620 for i := range t.pieces {
621 rle.Append(t.pieces[i].availability(), 1)
627 func (t *Torrent) pieceAvailabilityFrequencies() (freqs []int) {
628 freqs = make([]int, t.numActivePeers()+1)
629 for i := range t.pieces {
630 freqs[t.piece(i).availability()]++
635 func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
636 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
637 ret = append(ret, PieceStateRun{
638 PieceState: el.(PieceState),
642 for index := range t.pieces {
643 rle.Append(t.pieceState(pieceIndex(index)), 1)
649 // Produces a small string representing a PieceStateRun.
650 func (psr PieceStateRun) String() (ret string) {
651 ret = fmt.Sprintf("%d", psr.Length)
652 ret += func() string {
653 switch psr.Priority {
654 case PiecePriorityNext:
656 case PiecePriorityNormal:
658 case PiecePriorityReadahead:
660 case PiecePriorityNow:
662 case PiecePriorityHigh:
671 if psr.QueuedForHash {
689 func (t *Torrent) writeStatus(w io.Writer) {
690 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
691 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
693 fmt.Fprintf(w, "Metadata have: ")
694 for _, h := range t.metadataCompletedChunks {
695 fmt.Fprintf(w, "%c", func() rune {
705 fmt.Fprintf(w, "Piece length: %s\n",
708 return fmt.Sprintf("%v (%v chunks)",
710 float64(t.usualPieceSize())/float64(t.chunkSize))
717 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
718 fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns())
719 // Generates a huge, unhelpful listing when piece availability is very scattered. Prefer
720 // availability frequencies instead.
722 fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
723 for _, run := range t.pieceAvailabilityRuns() {
724 ret = append(ret, run.String())
729 fmt.Fprintf(w, "Piece availability frequency: %v\n", strings.Join(
730 func() (ret []string) {
731 for avail, freq := range t.pieceAvailabilityFrequencies() {
735 ret = append(ret, fmt.Sprintf("%v: %v", avail, freq))
741 fmt.Fprintf(w, "Reader Pieces:")
742 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
743 fmt.Fprintf(w, " %d:%d", begin, end)
748 fmt.Fprintf(w, "Enabled trackers:\n")
750 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
751 fmt.Fprintf(tw, " URL\tExtra\n")
752 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r torrentTrackerAnnouncer) bool {
755 var luns, runs url.URL = *lu, *ru
758 var ml missinggo.MultiLess
759 ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
760 ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
762 }).([]torrentTrackerAnnouncer) {
763 fmt.Fprintf(tw, " %q\t%v\n", ta.URL(), ta.statusLine())
768 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
770 dumpStats(w, t.statsLocked())
772 fmt.Fprintf(w, "webseeds:\n")
773 t.writePeerStatuses(w, maps.Values(t.webSeeds))
775 peerConns := maps.Keys(t.conns)
776 // Peers without priorities first, then those with. I'm undecided about how to order peers
777 // without priorities.
778 sort.Slice(peerConns, func(li, ri int) bool {
781 ml := multiless.New()
782 lpp := g.ResultFromTuple(l.peerPriority()).ToOption()
783 rpp := g.ResultFromTuple(r.peerPriority()).ToOption()
784 ml = ml.Bool(lpp.Ok, rpp.Ok)
785 ml = ml.Uint32(rpp.Value, lpp.Value)
789 fmt.Fprintf(w, "%v peer conns:\n", len(peerConns))
790 t.writePeerStatuses(w, g.SliceMap(peerConns, func(pc *PeerConn) *Peer {
795 func (t *Torrent) writePeerStatuses(w io.Writer, peers []*Peer) {
797 for _, c := range peers {
801 w.Write(bytes.TrimRight(
802 bytes.ReplaceAll(buf.Bytes(), []byte("\n"), []byte("\n ")),
807 func (t *Torrent) haveInfo() bool {
811 // Returns a run-time generated MetaInfo that includes the info bytes and
812 // announce-list as currently known to the client.
813 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
814 return metainfo.MetaInfo{
815 CreationDate: time.Now().Unix(),
816 Comment: "dynamic metainfo from client",
817 CreatedBy: "go.torrent",
818 AnnounceList: t.metainfo.UpvertedAnnounceList().Clone(),
819 InfoBytes: func() []byte {
821 return t.metadataBytes
826 UrlList: func() []string {
827 ret := make([]string, 0, len(t.webSeeds))
828 for url := range t.webSeeds {
829 ret = append(ret, url)
836 // Returns a count of bytes that are not complete in storage, and not pending being written to
837 // storage. This value is from the perspective of the download manager, and may not agree with the
838 // actual state in storage. If you want read data synchronously you should use a Reader. See
839 // https://github.com/anacrolix/torrent/issues/828.
840 func (t *Torrent) BytesMissing() (n int64) {
842 n = t.bytesMissingLocked()
847 func (t *Torrent) bytesMissingLocked() int64 {
851 func iterFlipped(b *roaring.Bitmap, end uint64, cb func(uint32) bool) {
852 roaring.Flip(b, 0, end).Iterate(cb)
855 func (t *Torrent) bytesLeft() (left int64) {
856 iterFlipped(&t._completedPieces, uint64(t.numPieces()), func(x uint32) bool {
857 p := t.piece(pieceIndex(x))
858 left += int64(p.length() - p.numDirtyBytes())
864 // Bytes left to give in tracker announces.
865 func (t *Torrent) bytesLeftAnnounce() int64 {
873 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
874 if t.pieceComplete(piece) {
877 if t.pieceAllDirty(piece) {
880 return t.pieces[piece].hasDirtyChunks()
883 func (t *Torrent) usualPieceSize() int {
884 return int(t.info.PieceLength)
887 func (t *Torrent) numPieces() pieceIndex {
888 return t.info.NumPieces()
891 func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
892 return pieceIndex(t._completedPieces.GetCardinality())
895 func (t *Torrent) close(wg *sync.WaitGroup) (err error) {
897 err = errors.New("already closed")
900 for _, f := range t.onClose {
903 if t.storage != nil {
908 defer t.storageLock.Unlock()
909 if f := t.storage.Close; f != nil {
912 t.logger.WithDefaultLevel(log.Warning).Printf("error closing storage: %v", err1)
917 t.iterPeers(func(p *Peer) {
920 if t.storage != nil {
921 t.deletePieceRequestOrder()
923 t.assertAllPiecesRelativeAvailabilityZero()
925 t.cl.event.Broadcast()
926 t.pieceStateChanges.Close()
927 t.updateWantPeersEvent()
931 func (t *Torrent) assertAllPiecesRelativeAvailabilityZero() {
932 for i := range t.pieces {
934 if p.relativeAvailability != 0 {
935 panic(fmt.Sprintf("piece %v has relative availability %v", i, p.relativeAvailability))
940 func (t *Torrent) requestOffset(r Request) int64 {
941 return torrentRequestOffset(t.length(), int64(t.usualPieceSize()), r)
944 // Return the request that would include the given offset into the torrent data. Returns !ok if
945 // there is no such request.
946 func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
947 return torrentOffsetRequest(t.length(), t.info.PieceLength, int64(t.chunkSize), off)
950 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
951 //defer perf.ScopeTimerErr(&err)()
952 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
953 if err == nil && n != len(data) {
954 err = io.ErrShortWrite
959 func (t *Torrent) bitfield() (bf []bool) {
960 bf = make([]bool, t.numPieces())
961 t._completedPieces.Iterate(func(piece uint32) (again bool) {
968 func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType {
969 return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
972 func (t *Torrent) chunksPerRegularPiece() chunkIndexType {
973 return t._chunksPerRegularPiece
976 func (t *Torrent) numChunks() RequestIndex {
977 if t.numPieces() == 0 {
980 return RequestIndex(t.numPieces()-1)*t.chunksPerRegularPiece() + t.pieceNumChunks(t.numPieces()-1)
983 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
984 t.dirtyChunks.RemoveRange(
985 uint64(t.pieceRequestIndexOffset(pieceIndex)),
986 uint64(t.pieceRequestIndexOffset(pieceIndex+1)))
989 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
990 if t.info.PieceLength == 0 {
991 // There will be no variance amongst pieces. Only pain.
994 if piece == t.numPieces()-1 {
995 ret := pp.Integer(t.length() % t.info.PieceLength)
1000 return pp.Integer(t.info.PieceLength)
1003 func (t *Torrent) smartBanBlockCheckingWriter(piece pieceIndex) *blockCheckingWriter {
1004 return &blockCheckingWriter{
1005 cache: &t.smartBanCache,
1006 requestIndex: t.pieceRequestIndexOffset(piece),
1007 chunkSize: t.chunkSize.Int(),
1011 func (t *Torrent) hashPiece(piece pieceIndex) (
1013 // These are peers that sent us blocks that differ from what we hash here.
1014 differingPeers map[bannableAddr]struct{},
1018 p.waitNoPendingWrites()
1019 storagePiece := t.pieces[piece].Storage()
1021 // Does the backend want to do its own hashing?
1022 if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
1023 var sum metainfo.Hash
1024 // log.Printf("A piece decided to self-hash: %d", piece)
1025 sum, err = i.SelfHash()
1026 missinggo.CopyExact(&ret, sum)
1030 hash := pieceHash.New()
1031 const logPieceContents = false
1032 smartBanWriter := t.smartBanBlockCheckingWriter(piece)
1033 writers := []io.Writer{hash, smartBanWriter}
1034 var examineBuf bytes.Buffer
1035 if logPieceContents {
1036 writers = append(writers, &examineBuf)
1038 _, err = storagePiece.WriteTo(io.MultiWriter(writers...))
1039 if logPieceContents {
1040 t.logger.WithDefaultLevel(log.Debug).Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
1042 smartBanWriter.Flush()
1043 differingPeers = smartBanWriter.badPeers
1044 missinggo.CopyExact(&ret, hash.Sum(nil))
1048 func (t *Torrent) haveAnyPieces() bool {
1049 return !t._completedPieces.IsEmpty()
1052 func (t *Torrent) haveAllPieces() bool {
1056 return t._completedPieces.GetCardinality() == bitmap.BitRange(t.numPieces())
1059 func (t *Torrent) havePiece(index pieceIndex) bool {
1060 return t.haveInfo() && t.pieceComplete(index)
1063 func (t *Torrent) maybeDropMutuallyCompletePeer(
1064 // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's
1068 if !t.cl.config.DropMutuallyCompletePeers {
1071 if !t.haveAllPieces() {
1074 if all, known := p.peerHasAllPieces(); !(known && all) {
1080 p.logger.Levelf(log.Debug, "is mutually complete; dropping")
1084 func (t *Torrent) haveChunk(r Request) (ret bool) {
1086 // log.Println("have chunk", r, ret)
1091 if t.pieceComplete(pieceIndex(r.Index)) {
1094 p := &t.pieces[r.Index]
1095 return !p.pendingChunk(r.ChunkSpec, t.chunkSize)
1098 func chunkIndexFromChunkSpec(cs ChunkSpec, chunkSize pp.Integer) chunkIndexType {
1099 return chunkIndexType(cs.Begin / chunkSize)
1102 func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
1103 return !t._pendingPieces.IsEmpty() && t._pendingPieces.Contains(uint32(index))
1106 // A pool of []*PeerConn, to reduce allocations in functions that need to index or sort Torrent
1107 // conns (which is a map).
1108 var peerConnSlices sync.Pool
1110 func getPeerConnSlice(cap int) []*PeerConn {
1111 getInterface := peerConnSlices.Get()
1112 if getInterface == nil {
1113 return make([]*PeerConn, 0, cap)
1115 return getInterface.([]*PeerConn)[:0]
1119 // Calls the given function with a slice of unclosed conns. It uses a pool to reduce allocations as
1120 // this is a frequent occurrence.
1121 func (t *Torrent) withUnclosedConns(f func([]*PeerConn)) {
1122 sl := t.appendUnclosedConns(getPeerConnSlice(len(t.conns)))
1124 peerConnSlices.Put(sl)
1127 func (t *Torrent) worstBadConnFromSlice(opts worseConnLensOpts, sl []*PeerConn) *PeerConn {
1128 wcs := worseConnSlice{conns: sl}
1131 for wcs.Len() != 0 {
1132 c := heap.Pop(&wcs).(*PeerConn)
1133 if opts.incomingIsBad && !c.outgoing {
1136 if opts.outgoingIsBad && c.outgoing {
1139 if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
1142 // If the connection is in the worst half of the established
1143 // connection quota and is older than a minute.
1144 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
1145 // Give connections 1 minute to prove themselves.
1146 if time.Since(c.completedHandshake) > time.Minute {
1154 // The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
1155 // connection is one that usually sends us unwanted pieces, or has been in the worse half of the
1156 // established connections for more than a minute. This is O(n log n). If there was a way to not
1157 // consider the position of a conn relative to the total number, it could be reduced to O(n).
1158 func (t *Torrent) worstBadConn(opts worseConnLensOpts) (ret *PeerConn) {
1159 t.withUnclosedConns(func(ucs []*PeerConn) {
1160 ret = t.worstBadConnFromSlice(opts, ucs)
1165 type PieceStateChange struct {
1170 func (t *Torrent) publishPieceStateChange(piece pieceIndex) {
1171 t.cl._mu.Defer(func() {
1172 cur := t.pieceState(piece)
1173 p := &t.pieces[piece]
1174 if cur != p.publicPieceState {
1175 p.publicPieceState = cur
1176 t.pieceStateChanges.Publish(PieceStateChange{
1184 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
1185 if t.pieceComplete(piece) {
1188 return pp.Integer(t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks())
1191 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
1192 return t.pieces[piece].allChunksDirty()
1195 func (t *Torrent) readersChanged() {
1196 t.updateReaderPieces()
1197 t.updateAllPiecePriorities("Torrent.readersChanged")
1200 func (t *Torrent) updateReaderPieces() {
1201 t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
1204 func (t *Torrent) readerPosChanged(from, to pieceRange) {
1208 t.updateReaderPieces()
1209 // Order the ranges, high and low.
1211 if l.begin > h.begin {
1214 if l.end < h.begin {
1215 // Two distinct ranges.
1216 t.updatePiecePriorities(l.begin, l.end, "Torrent.readerPosChanged")
1217 t.updatePiecePriorities(h.begin, h.end, "Torrent.readerPosChanged")
1224 t.updatePiecePriorities(l.begin, end, "Torrent.readerPosChanged")
1228 func (t *Torrent) maybeNewConns() {
1229 // Tickle the accept routine.
1230 t.cl.event.Broadcast()
1234 func (t *Torrent) onPiecePendingTriggers(piece pieceIndex, reason string) {
1235 if t._pendingPieces.Contains(uint32(piece)) {
1236 t.iterPeers(func(c *Peer) {
1237 // if c.requestState.Interested {
1240 if !c.isLowOnRequests() {
1243 if !c.peerHasPiece(piece) {
1246 if c.requestState.Interested && c.peerChoking && !c.peerAllowedFast.Contains(piece) {
1249 c.updateRequests(reason)
1253 t.publishPieceStateChange(piece)
1256 func (t *Torrent) updatePiecePriorityNoTriggers(piece pieceIndex) (pendingChanged bool) {
1257 if !t.closed.IsSet() {
1258 // It would be possible to filter on pure-priority changes here to avoid churning the piece
1260 t.updatePieceRequestOrderPiece(piece)
1262 p := &t.pieces[piece]
1263 newPrio := p.uncachedPriority()
1264 // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
1265 if newPrio == PiecePriorityNone {
1266 return t._pendingPieces.CheckedRemove(uint32(piece))
1268 return t._pendingPieces.CheckedAdd(uint32(piece))
1272 func (t *Torrent) updatePiecePriority(piece pieceIndex, reason string) {
1273 if t.updatePiecePriorityNoTriggers(piece) && !t.disableTriggers {
1274 t.onPiecePendingTriggers(piece, reason)
1276 t.updatePieceRequestOrderPiece(piece)
1279 func (t *Torrent) updateAllPiecePriorities(reason string) {
1280 t.updatePiecePriorities(0, t.numPieces(), reason)
1283 // Update all piece priorities in one hit. This function should have the same
1284 // output as updatePiecePriority, but across all pieces.
1285 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason string) {
1286 for i := begin; i < end; i++ {
1287 t.updatePiecePriority(i, reason)
1291 // Returns the range of pieces [begin, end) that contains the extent of bytes.
1292 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
1293 if off >= t.length() {
1303 begin = pieceIndex(off / t.info.PieceLength)
1304 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
1305 if end > pieceIndex(t.info.NumPieces()) {
1306 end = pieceIndex(t.info.NumPieces())
1311 // Returns true if all iterations complete without breaking. Returns the read regions for all
1312 // readers. The reader regions should not be merged as some callers depend on this method to
1313 // enumerate readers.
1314 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
1315 for r := range t.readers {
1317 if p.begin >= p.end {
1320 if !f(p.begin, p.end) {
1327 func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
1328 return t.piece(piece).uncachedPriority()
1331 func (t *Torrent) pendRequest(req RequestIndex) {
1332 t.piece(t.pieceIndexOfRequestIndex(req)).pendChunkIndex(req % t.chunksPerRegularPiece())
1335 func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason string) {
1336 t.cl.event.Broadcast()
1337 if t.pieceComplete(piece) {
1338 t.onPieceCompleted(piece)
1340 t.onIncompletePiece(piece)
1342 t.updatePiecePriority(piece, reason)
1345 func (t *Torrent) numReceivedConns() (ret int) {
1346 for c := range t.conns {
1347 if c.Discovery == PeerSourceIncoming {
1354 func (t *Torrent) numOutgoingConns() (ret int) {
1355 for c := range t.conns {
1363 func (t *Torrent) maxHalfOpen() int {
1364 // Note that if we somehow exceed the maximum established conns, we want
1365 // the negative value to have an effect.
1366 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
1367 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
1368 // We want to allow some experimentation with new peers, and to try to
1369 // upset an oversupply of received connections.
1371 max(5, extraIncoming)+establishedHeadroom,
1372 int64(t.cl.config.HalfOpenConnsPerTorrent),
1376 func (t *Torrent) openNewConns() (initiated int) {
1377 defer t.updateWantPeersEvent()
1378 for t.peers.Len() != 0 {
1379 if !t.wantOutgoingConns() {
1382 if len(t.halfOpen) >= t.maxHalfOpen() {
1385 if len(t.cl.dialers) == 0 {
1388 if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns {
1391 p := t.peers.PopMax()
1392 opts := outgoingConnOpts{
1395 requireRendezvous: false,
1396 skipHolepunchRendezvous: false,
1397 receivedHolepunchConnect: false,
1398 HeaderObfuscationPolicy: t.cl.config.HeaderObfuscationPolicy,
1400 initiateConn(opts, false)
1406 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
1408 uncached := t.pieceCompleteUncached(piece)
1409 cached := p.completion()
1410 changed := cached != uncached
1411 complete := uncached.Complete
1412 p.storageCompletionOk = uncached.Ok
1415 t._completedPieces.Add(x)
1418 t._completedPieces.Remove(x)
1420 p.t.updatePieceRequestOrderPiece(piece)
1422 if complete && len(p.dirtiers) != 0 {
1423 t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
1427 // "piece completion changed",
1428 // slog.Int("piece", piece),
1429 // slog.Any("from", cached),
1430 // slog.Any("to", uncached))
1431 t.pieceCompletionChanged(piece, "Torrent.updatePieceCompletion")
1436 // Non-blocking read. Client lock is not required.
1437 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1439 p := &t.pieces[off/t.info.PieceLength]
1440 p.waitNoPendingWrites()
1442 n1, err = p.Storage().ReadAt(b, off-p.Info().Offset())
1453 // Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on
1454 // the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage
1455 // etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately.
1456 func (t *Torrent) maybeCompleteMetadata() error {
1461 if !t.haveAllMetadataPieces() {
1462 // Don't have enough metadata pieces.
1465 err := t.setInfoBytesLocked(t.metadataBytes)
1467 t.invalidateMetadata()
1468 return fmt.Errorf("error setting info bytes: %s", err)
1470 if t.cl.config.Debug {
1471 t.logger.Printf("%s: got metadata from peers", t)
1476 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1477 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
1479 now.Add(bitmap.BitIndex(begin))
1480 readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end))
1487 func (t *Torrent) needData() bool {
1488 if t.closed.IsSet() {
1494 return !t._pendingPieces.IsEmpty()
1497 func appendMissingStrings(old, new []string) (ret []string) {
1500 for _, n := range new {
1501 for _, o := range old {
1506 ret = append(ret, n)
1511 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1513 for minNumTiers > len(ret) {
1514 ret = append(ret, nil)
1519 func (t *Torrent) addTrackers(announceList [][]string) {
1520 fullAnnounceList := &t.metainfo.AnnounceList
1521 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1522 for tierIndex, trackerURLs := range announceList {
1523 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1525 t.startMissingTrackerScrapers()
1526 t.updateWantPeersEvent()
1529 // Don't call this before the info is available.
1530 func (t *Torrent) bytesCompleted() int64 {
1534 return t.length() - t.bytesLeft()
1537 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1540 return t.setInfoBytesLocked(b)
1543 // Returns true if connection is removed from torrent.Conns.
1544 func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
1545 if !c.closed.IsSet() {
1546 panic("connection is not closed")
1547 // There are behaviours prevented by the closed state that will fail
1548 // if the connection has been deleted.
1552 // Avoid adding a drop event more than once. Probably we should track whether we've generated
1553 // the drop event against the PexConnState instead.
1555 if !t.cl.config.DisablePEX {
1559 torrent.Add("deleted connections", 1)
1560 c.deleteAllRequests("Torrent.deletePeerConn")
1561 t.assertPendingRequests()
1562 if t.numActivePeers() == 0 && len(t.connsWithAllPieces) != 0 {
1563 panic(t.connsWithAllPieces)
1568 func (t *Torrent) decPeerPieceAvailability(p *Peer) {
1569 if t.deleteConnWithAllPieces(p) {
1575 p.peerPieces().Iterate(func(i uint32) bool {
1576 p.t.decPieceAvailability(pieceIndex(i))
1581 func (t *Torrent) assertPendingRequests() {
1585 // var actual pendingRequests
1586 // if t.haveInfo() {
1587 // actual.m = make([]int, t.numChunks())
1589 // t.iterPeers(func(p *Peer) {
1590 // p.requestState.Requests.Iterate(func(x uint32) bool {
1595 // diff := cmp.Diff(actual.m, t.pendingRequests.m)
1601 func (t *Torrent) dropConnection(c *PeerConn) {
1602 t.cl.event.Broadcast()
1604 if t.deletePeerConn(c) {
1609 // Peers as in contact information for dialing out.
1610 func (t *Torrent) wantPeers() bool {
1611 if t.closed.IsSet() {
1614 if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
1617 return t.wantOutgoingConns()
1620 func (t *Torrent) updateWantPeersEvent() {
1622 t.wantPeersEvent.Set()
1624 t.wantPeersEvent.Clear()
1628 // Returns whether the client should make effort to seed the torrent.
1629 func (t *Torrent) seeding() bool {
1631 if t.closed.IsSet() {
1634 if t.dataUploadDisallowed {
1637 if cl.config.NoUpload {
1640 if !cl.config.Seed {
1643 if cl.config.DisableAggressiveUpload && t.needData() {
1649 func (t *Torrent) onWebRtcConn(
1650 c datachannel.ReadWriteCloser,
1651 dcc webtorrent.DataChannelContext,
1654 netConn := webrtcNetConn{
1656 DataChannelContext: dcc,
1658 peerRemoteAddr := netConn.RemoteAddr()
1659 //t.logger.Levelf(log.Critical, "onWebRtcConn remote addr: %v", peerRemoteAddr)
1660 if t.cl.badPeerAddr(peerRemoteAddr) {
1663 localAddrIpPort := missinggo.IpPortFromNetAddr(netConn.LocalAddr())
1664 pc, err := t.cl.initiateProtocolHandshakes(
1665 context.Background(),
1670 outgoing: dcc.LocalOffered,
1671 remoteAddr: peerRemoteAddr,
1672 localPublicAddr: localAddrIpPort,
1673 network: webrtcNetwork,
1674 connString: fmt.Sprintf("webrtc offer_id %x: %v", dcc.OfferId, regularNetConnPeerConnConnString(netConn)),
1678 t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
1681 if dcc.LocalOffered {
1682 pc.Discovery = PeerSourceTracker
1684 pc.Discovery = PeerSourceIncoming
1686 pc.conn.SetWriteDeadline(time.Time{})
1689 err = t.runHandshookConn(pc)
1691 t.logger.WithDefaultLevel(log.Debug).Printf("error running handshook webrtc conn: %v", err)
1695 func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
1696 err := t.runHandshookConn(pc)
1697 if err != nil || logAll {
1698 t.logger.WithDefaultLevel(level).Levelf(log.ErrorLevel(err), "error running handshook conn: %v", err)
1702 func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
1703 t.logRunHandshookConn(pc, false, log.Debug)
1706 func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
1707 wtc, release := t.cl.websocketTrackers.Get(u.String(), t.infoHash)
1708 // This needs to run before the Torrent is dropped from the Client, to prevent a new webtorrent.TrackerClient for
1709 // the same info hash before the old one is cleaned up.
1710 t.onClose = append(t.onClose, release)
1711 wst := websocketTrackerStatus{u, wtc}
1713 err := wtc.Announce(tracker.Started, t.infoHash)
1715 t.logger.WithDefaultLevel(log.Warning).Printf(
1716 "error in initial announce to %q: %v",
1724 func (t *Torrent) startScrapingTracker(_url string) {
1728 u, err := url.Parse(_url)
1730 // URLs with a leading '*' appear to be a uTorrent convention to disable trackers.
1732 t.logger.Levelf(log.Warning, "error parsing tracker url: %v", err)
1736 if u.Scheme == "udp" {
1738 t.startScrapingTracker(u.String())
1740 t.startScrapingTracker(u.String())
1743 if _, ok := t.trackerAnnouncers[_url]; ok {
1746 sl := func() torrentTrackerAnnouncer {
1749 if t.cl.config.DisableWebtorrent {
1752 return t.startWebsocketAnnouncer(*u)
1754 if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 {
1758 if t.cl.config.DisableIPv6 {
1762 newAnnouncer := &trackerScraper{
1765 lookupTrackerIp: t.cl.config.LookupTrackerIp,
1767 go newAnnouncer.Run()
1773 if t.trackerAnnouncers == nil {
1774 t.trackerAnnouncers = make(map[string]torrentTrackerAnnouncer)
1776 t.trackerAnnouncers[_url] = sl
1779 // Adds and starts tracker scrapers for tracker URLs that aren't already
1781 func (t *Torrent) startMissingTrackerScrapers() {
1782 if t.cl.config.DisableTrackers {
1785 t.startScrapingTracker(t.metainfo.Announce)
1786 for _, tier := range t.metainfo.AnnounceList {
1787 for _, url := range tier {
1788 t.startScrapingTracker(url)
1793 // Returns an AnnounceRequest with fields filled out to defaults and current
1795 func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
1796 // Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's
1797 // dependent on the network in use.
1798 return tracker.AnnounceRequest{
1800 NumWant: func() int32 {
1801 if t.wantPeers() && len(t.cl.dialers) > 0 {
1802 return 200 // Win has UDP packet limit. See: https://github.com/anacrolix/torrent/issues/764
1807 Port: uint16(t.cl.incomingPeerPort()),
1808 PeerId: t.cl.peerID,
1809 InfoHash: t.infoHash,
1810 Key: t.cl.announceKey(),
1812 // The following are vaguely described in BEP 3.
1814 Left: t.bytesLeftAnnounce(),
1815 Uploaded: t.stats.BytesWrittenData.Int64(),
1816 // There's no mention of wasted or unwanted download in the BEP.
1817 Downloaded: t.stats.BytesReadUsefulData.Int64(),
1821 // Adds peers revealed in an announce until the announce ends, or we have
1823 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
1825 for v := range pvs {
1828 for _, cp := range v.Peers {
1830 // Can't do anything with this.
1833 if t.addPeer(PeerInfo{
1834 Addr: ipPortAddr{cp.IP, cp.Port},
1835 Source: PeerSourceDhtGetPeers,
1842 // log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
1847 // Announce using the provided DHT server. Peers are consumed automatically. done is closed when the
1848 // announce ends. stop will force the announce to end.
1849 func (t *Torrent) AnnounceToDht(s DhtServer) (done <-chan struct{}, stop func(), err error) {
1850 ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), true)
1854 _done := make(chan struct{})
1858 t.consumeDhtAnnouncePeers(ps.Peers())
1864 func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error {
1865 _, stop, err := t.AnnounceToDht(s)
1870 case <-t.closed.Done():
1871 case <-time.After(5 * time.Minute):
1877 func (t *Torrent) dhtAnnouncer(s DhtServer) {
1883 if t.closed.IsSet() {
1886 // We're also announcing ourselves as a listener, so we don't just want peer addresses.
1887 // TODO: We can include the announce_peer step depending on whether we can receive
1888 // inbound connections. We should probably only announce once every 15 mins too.
1889 if !t.wantAnyConns() {
1892 // TODO: Determine if there's a listener on the port we're announcing.
1893 if len(cl.dialers) == 0 && len(cl.listeners) == 0 {
1904 err := t.timeboxedAnnounceToDht(s)
1906 t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err)
1912 func (t *Torrent) addPeers(peers []PeerInfo) (added int) {
1913 for _, p := range peers {
1921 // The returned TorrentStats may require alignment in memory. See
1922 // https://github.com/anacrolix/torrent/issues/383.
1923 func (t *Torrent) Stats() TorrentStats {
1925 defer t.cl.rUnlock()
1926 return t.statsLocked()
1929 func (t *Torrent) statsLocked() (ret TorrentStats) {
1930 ret.ActivePeers = len(t.conns)
1931 ret.HalfOpenPeers = len(t.halfOpen)
1932 ret.PendingPeers = t.peers.Len()
1933 ret.TotalPeers = t.numTotalPeers()
1934 ret.ConnectedSeeders = 0
1935 for c := range t.conns {
1936 if all, ok := c.peerHasAllPieces(); all && ok {
1937 ret.ConnectedSeeders++
1940 ret.ConnStats = t.stats.Copy()
1941 ret.PiecesComplete = t.numPiecesCompleted()
1945 // The total number of peers in the torrent.
1946 func (t *Torrent) numTotalPeers() int {
1947 peers := make(map[string]struct{})
1948 for conn := range t.conns {
1949 ra := conn.conn.RemoteAddr()
1951 // It's been closed and doesn't support RemoteAddr.
1954 peers[ra.String()] = struct{}{}
1956 for addr := range t.halfOpen {
1957 peers[addr] = struct{}{}
1959 t.peers.Each(func(peer PeerInfo) {
1960 peers[peer.Addr.String()] = struct{}{}
1965 // Reconcile bytes transferred before connection was associated with a
1967 func (t *Torrent) reconcileHandshakeStats(c *Peer) {
1968 if c._stats != (ConnStats{
1969 // Handshakes should only increment these fields:
1970 BytesWritten: c._stats.BytesWritten,
1971 BytesRead: c._stats.BytesRead,
1975 c.postHandshakeStats(func(cs *ConnStats) {
1976 cs.BytesRead.Add(c._stats.BytesRead.Int64())
1977 cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
1979 c.reconciledHandshakeStats = true
1982 // Returns true if the connection is added.
1983 func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
1986 torrent.Add("added connections", 1)
1989 if t.closed.IsSet() {
1990 return errors.New("torrent closed")
1992 for c0 := range t.conns {
1993 if c.PeerID != c0.PeerID {
1996 if !t.cl.config.DropDuplicatePeerIds {
1999 if c.hasPreferredNetworkOver(c0) {
2001 t.deletePeerConn(c0)
2003 return errors.New("existing connection preferred")
2006 if len(t.conns) >= t.maxEstablishedConns {
2007 numOutgoing := t.numOutgoingConns()
2008 numIncoming := len(t.conns) - numOutgoing
2009 c := t.worstBadConn(worseConnLensOpts{
2010 // We've already established that we have too many connections at this point, so we just
2011 // need to match what kind we have too many of vs. what we're trying to add now.
2012 incomingIsBad: (numIncoming-numOutgoing > 1) && c.outgoing,
2013 outgoingIsBad: (numOutgoing-numIncoming > 1) && !c.outgoing,
2016 return errors.New("don't want conn")
2021 if len(t.conns) >= t.maxEstablishedConns {
2024 t.conns[c] = struct{}{}
2025 t.cl.event.Broadcast()
2026 // We'll never receive the "p" extended handshake parameter.
2027 if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() {
2033 func (t *Torrent) newConnsAllowed() bool {
2034 if !t.networkingEnabled.Bool() {
2037 if t.closed.IsSet() {
2040 if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
2046 func (t *Torrent) wantAnyConns() bool {
2047 if !t.networkingEnabled.Bool() {
2050 if t.closed.IsSet() {
2053 if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
2056 return len(t.conns) < t.maxEstablishedConns
2059 func (t *Torrent) wantOutgoingConns() bool {
2060 if !t.newConnsAllowed() {
2063 if len(t.conns) < t.maxEstablishedConns {
2066 numIncomingConns := len(t.conns) - t.numOutgoingConns()
2067 return t.worstBadConn(worseConnLensOpts{
2068 incomingIsBad: numIncomingConns-t.numOutgoingConns() > 1,
2069 outgoingIsBad: false,
2073 func (t *Torrent) wantIncomingConns() bool {
2074 if !t.newConnsAllowed() {
2077 if len(t.conns) < t.maxEstablishedConns {
2080 numIncomingConns := len(t.conns) - t.numOutgoingConns()
2081 return t.worstBadConn(worseConnLensOpts{
2082 incomingIsBad: false,
2083 outgoingIsBad: t.numOutgoingConns()-numIncomingConns > 1,
2087 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
2090 oldMax = t.maxEstablishedConns
2091 t.maxEstablishedConns = max
2092 wcs := worseConnSlice{
2093 conns: t.appendConns(nil, func(*PeerConn) bool {
2097 wcs.initKeys(worseConnLensOpts{})
2099 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
2100 t.dropConnection(heap.Pop(&wcs).(*PeerConn))
2106 func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
2107 t.logger.LazyLog(log.Debug, func() log.Msg {
2108 return log.Fstr("hashed piece %d (passed=%t)", piece, passed)
2112 t.cl.event.Broadcast()
2113 if t.closed.IsSet() {
2117 // Don't score the first time a piece is hashed, it could be an initial check.
2118 if p.storageCompletionOk {
2120 pieceHashedCorrect.Add(1)
2123 "piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
2124 ).AddValues(t, p).LogLevel(
2126 log.Debug, t.logger)
2128 pieceHashedNotCorrect.Add(1)
2133 t.publishPieceStateChange(piece)
2136 t.publishPieceStateChange(piece)
2140 if len(p.dirtiers) != 0 {
2141 // Don't increment stats above connection-level for every involved connection.
2142 t.allStats((*ConnStats).incrementPiecesDirtiedGood)
2144 for c := range p.dirtiers {
2145 c._stats.incrementPiecesDirtiedGood()
2147 t.clearPieceTouchers(piece)
2148 hasDirty := p.hasDirtyChunks()
2151 p.Flush() // You can be synchronous here!
2153 err := p.Storage().MarkComplete()
2155 t.logger.Levelf(log.Warning, "%T: error marking piece complete %d: %s", t.storage, piece, err)
2159 if t.closed.IsSet() {
2162 t.pendAllChunkSpecs(piece)
2164 if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil {
2165 // Peers contributed to all the data for this piece hash failure, and the failure was
2166 // not due to errors in the storage (such as data being dropped in a cache).
2168 // Increment Torrent and above stats, and then specific connections.
2169 t.allStats((*ConnStats).incrementPiecesDirtiedBad)
2170 for c := range p.dirtiers {
2171 // Y u do dis peer?!
2172 c.stats().incrementPiecesDirtiedBad()
2175 bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
2176 for c := range p.dirtiers {
2178 bannableTouchers = append(bannableTouchers, c)
2181 t.clearPieceTouchers(piece)
2182 slices.Sort(bannableTouchers, connLessTrusted)
2184 if t.cl.config.Debug {
2186 "bannable conns by trust for piece %d: %v",
2188 func() (ret []connectionTrust) {
2189 for _, c := range bannableTouchers {
2190 ret = append(ret, c.trust())
2197 if len(bannableTouchers) >= 1 {
2198 c := bannableTouchers[0]
2199 if len(bannableTouchers) != 1 {
2200 t.logger.Levelf(log.Debug, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece)
2202 // Turns out it's still useful to ban peers like this because if there's only a
2203 // single peer for a piece, and we never progress that piece to completion, we
2204 // will never smart-ban them. Discovered in
2205 // https://github.com/anacrolix/torrent/issues/715.
2206 t.logger.Levelf(log.Warning, "banning %v for being sole dirtier of piece %v after failed piece check", c, piece)
2211 t.onIncompletePiece(piece)
2212 p.Storage().MarkNotComplete()
2214 t.updatePieceCompletion(piece)
2217 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
2218 start := t.pieceRequestIndexOffset(piece)
2219 end := start + t.pieceNumChunks(piece)
2220 for ri := start; ri < end; ri++ {
2225 func (t *Torrent) onPieceCompleted(piece pieceIndex) {
2226 t.pendAllChunkSpecs(piece)
2227 t.cancelRequestsForPiece(piece)
2228 t.piece(piece).readerCond.Broadcast()
2229 for conn := range t.conns {
2231 t.maybeDropMutuallyCompletePeer(conn)
2235 // Called when a piece is found to be not complete.
2236 func (t *Torrent) onIncompletePiece(piece pieceIndex) {
2237 if t.pieceAllDirty(piece) {
2238 t.pendAllChunkSpecs(piece)
2240 if !t.wantPieceIndex(piece) {
2241 // t.logger.Printf("piece %d incomplete and unwanted", piece)
2244 // We could drop any connections that we told we have a piece that we
2245 // don't here. But there's a test failure, and it seems clients don't care
2246 // if you request pieces that you already claim to have. Pruning bad
2247 // connections might just remove any connections that aren't treating us
2248 // favourably anyway.
2250 // for c := range t.conns {
2251 // if c.sentHave(piece) {
2255 t.iterPeers(func(conn *Peer) {
2256 if conn.peerHasPiece(piece) {
2257 conn.updateRequests("piece incomplete")
2262 func (t *Torrent) tryCreateMorePieceHashers() {
2263 for !t.closed.IsSet() && t.activePieceHashes < t.cl.config.PieceHashersPerTorrent && t.tryCreatePieceHasher() {
2267 func (t *Torrent) tryCreatePieceHasher() bool {
2268 if t.storage == nil {
2271 pi, ok := t.getPieceToHash()
2276 t.piecesQueuedForHash.Remove(bitmap.BitIndex(pi))
2278 t.publishPieceStateChange(pi)
2279 t.updatePiecePriority(pi, "Torrent.tryCreatePieceHasher")
2280 t.storageLock.RLock()
2281 t.activePieceHashes++
2282 go t.pieceHasher(pi)
2286 func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
2287 t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
2288 if t.piece(i).hashing {
2298 func (t *Torrent) dropBannedPeers() {
2299 t.iterPeers(func(p *Peer) {
2300 remoteIp := p.remoteIp()
2301 if remoteIp == nil {
2302 if p.bannableAddr.Ok {
2303 t.logger.WithDefaultLevel(log.Debug).Printf("can't get remote ip for peer %v", p)
2307 netipAddr := netip.MustParseAddr(remoteIp.String())
2308 if Some(netipAddr) != p.bannableAddr {
2309 t.logger.WithDefaultLevel(log.Debug).Printf(
2310 "peer remote ip does not match its bannable addr [peer=%v, remote ip=%v, bannable addr=%v]",
2311 p, remoteIp, p.bannableAddr)
2313 if _, ok := t.cl.badPeerIPs[netipAddr]; ok {
2314 // Should this be a close?
2316 t.logger.WithDefaultLevel(log.Debug).Printf("dropped %v for banned remote IP %v", p, netipAddr)
2321 func (t *Torrent) pieceHasher(index pieceIndex) {
2323 sum, failedPeers, copyErr := t.hashPiece(index)
2324 correct := sum == *p.hash
2328 log.Fmsg("piece %v (%s) hash failure copy error: %v", p, p.hash.HexString(), copyErr).Log(t.logger)
2330 t.storageLock.RUnlock()
2334 for peer := range failedPeers {
2335 t.cl.banPeerIP(peer.AsSlice())
2336 t.logger.WithDefaultLevel(log.Debug).Printf("smart banned %v for piece %v", peer, index)
2339 for ri := t.pieceRequestIndexOffset(index); ri < t.pieceRequestIndexOffset(index+1); ri++ {
2340 t.smartBanCache.ForgetBlock(ri)
2344 t.pieceHashed(index, correct, copyErr)
2345 t.updatePiecePriority(index, "Torrent.pieceHasher")
2346 t.activePieceHashes--
2347 t.tryCreateMorePieceHashers()
2350 // Return the connections that touched a piece, and clear the entries while doing it.
2351 func (t *Torrent) clearPieceTouchers(pi pieceIndex) {
2353 for c := range p.dirtiers {
2354 delete(c.peerTouchedPieces, pi)
2355 delete(p.dirtiers, c)
2359 func (t *Torrent) peersAsSlice() (ret []*Peer) {
2360 t.iterPeers(func(p *Peer) {
2361 ret = append(ret, p)
2366 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
2367 piece := t.piece(pieceIndex)
2368 if piece.queuedForHash() {
2371 t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
2372 t.publishPieceStateChange(pieceIndex)
2373 t.updatePiecePriority(pieceIndex, "Torrent.queuePieceCheck")
2374 t.tryCreateMorePieceHashers()
2377 // Forces all the pieces to be re-hashed. See also Piece.VerifyData. This should not be called
2378 // before the Info is available.
2379 func (t *Torrent) VerifyData() {
2380 for i := pieceIndex(0); i < t.NumPieces(); i++ {
2381 t.Piece(i).VerifyData()
2385 func (t *Torrent) connectingToPeerAddr(addrStr string) bool {
2386 return len(t.halfOpen[addrStr]) != 0
2389 func (t *Torrent) hasPeerConnForAddr(x PeerRemoteAddr) bool {
2390 addrStr := x.String()
2391 for c := range t.conns {
2393 if ra.String() == addrStr {
2400 func (t *Torrent) getHalfOpenPath(
2402 attemptKey outgoingConnAttemptKey,
2403 ) nestedmaps.Path[*PeerInfo] {
2404 return nestedmaps.Next(nestedmaps.Next(nestedmaps.Begin(&t.halfOpen), addrStr), attemptKey)
2407 func (t *Torrent) addHalfOpen(addrStr string, attemptKey *PeerInfo) {
2408 path := t.getHalfOpenPath(addrStr, attemptKey)
2410 panic("should be unique")
2412 path.Set(attemptKey)
2416 // Start the process of connecting to the given peer for the given torrent if appropriate. I'm not
2417 // sure all the PeerInfo fields are being used.
2419 opts outgoingConnOpts,
2423 peer := opts.peerInfo
2424 if peer.Id == t.cl.peerID {
2427 if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted {
2431 addrStr := addr.String()
2433 if t.connectingToPeerAddr(addrStr) {
2437 if t.hasPeerConnForAddr(addr) {
2441 t.addHalfOpen(addrStr, attemptKey)
2442 go t.cl.outgoingConnection(
2448 // Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to
2449 // quickly make one Client visible to the Torrent of another Client.
2450 func (t *Torrent) AddClientPeer(cl *Client) int {
2451 return t.AddPeers(func() (ps []PeerInfo) {
2452 for _, la := range cl.ListenAddrs() {
2453 ps = append(ps, PeerInfo{
2462 // All stats that include this Torrent. Useful when we want to increment ConnStats but not for every
2464 func (t *Torrent) allStats(f func(*ConnStats)) {
2469 func (t *Torrent) hashingPiece(i pieceIndex) bool {
2470 return t.pieces[i].hashing
2473 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
2474 return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
2477 func (t *Torrent) dialTimeout() time.Duration {
2478 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
2481 func (t *Torrent) piece(i int) *Piece {
2485 func (t *Torrent) onWriteChunkErr(err error) {
2486 if t.userOnWriteChunkErr != nil {
2487 go t.userOnWriteChunkErr(err)
2490 t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download")
2491 t.disallowDataDownloadLocked()
2494 func (t *Torrent) DisallowDataDownload() {
2497 t.disallowDataDownloadLocked()
2500 func (t *Torrent) disallowDataDownloadLocked() {
2501 t.dataDownloadDisallowed.Set()
2502 t.iterPeers(func(p *Peer) {
2503 // Could check if peer request state is empty/not interested?
2504 p.updateRequests("disallow data download")
2505 p.cancelAllRequests()
2509 func (t *Torrent) AllowDataDownload() {
2512 t.dataDownloadDisallowed.Clear()
2513 t.iterPeers(func(p *Peer) {
2514 p.updateRequests("allow data download")
2518 // Enables uploading data, if it was disabled.
2519 func (t *Torrent) AllowDataUpload() {
2522 t.dataUploadDisallowed = false
2523 t.iterPeers(func(p *Peer) {
2524 p.updateRequests("allow data upload")
2528 // Disables uploading data, if it was enabled.
2529 func (t *Torrent) DisallowDataUpload() {
2532 t.dataUploadDisallowed = true
2533 for c := range t.conns {
2534 // TODO: This doesn't look right. Shouldn't we tickle writers to choke peers or something instead?
2535 c.updateRequests("disallow data upload")
2539 // Sets a handler that is called if there's an error writing a chunk to local storage. By default,
2540 // or if nil, a critical message is logged, and data download is disabled.
2541 func (t *Torrent) SetOnWriteChunkError(f func(error)) {
2544 t.userOnWriteChunkErr = f
2547 func (t *Torrent) iterPeers(f func(p *Peer)) {
2548 for pc := range t.conns {
2551 for _, ws := range t.webSeeds {
2556 func (t *Torrent) callbacks() *Callbacks {
2557 return &t.cl.config.Callbacks
2560 type AddWebSeedsOpt func(*webseed.Client)
2562 // Sets the WebSeed trailing path escaper for a webseed.Client.
2563 func WebSeedPathEscaper(custom webseed.PathEscaper) AddWebSeedsOpt {
2564 return func(c *webseed.Client) {
2565 c.PathEscaper = custom
2569 func (t *Torrent) AddWebSeeds(urls []string, opts ...AddWebSeedsOpt) {
2572 for _, u := range urls {
2573 t.addWebSeed(u, opts...)
2577 func (t *Torrent) addWebSeed(url string, opts ...AddWebSeedsOpt) {
2578 if t.cl.config.DisableWebseeds {
2581 if _, ok := t.webSeeds[url]; ok {
2584 // I don't think Go http supports pipelining requests. However, we can have more ready to go
2585 // right away. This value should be some multiple of the number of connections to a host. I
2586 // would expect that double maxRequests plus a bit would be appropriate. This value is based on
2587 // downloading Sintel (08ada5a7a6183aae1e09d831df6748d566095a10) from
2588 // "https://webtorrent.io/torrents/".
2589 const maxRequests = 16
2595 reconciledHandshakeStats: true,
2596 // This should affect how often we have to recompute requests for this peer. Note that
2597 // because we can request more than 1 thing at a time over HTTP, we will hit the low
2598 // requests mark more often, so recomputation is probably sooner than with regular peer
2599 // conns. ~4x maxRequests would be about right.
2600 PeerMaxRequests: 128,
2601 // TODO: Set ban prefix?
2602 RemoteAddr: remoteAddrFromUrl(url),
2603 callbacks: t.callbacks(),
2605 client: webseed.Client{
2606 HttpClient: t.cl.httpClient,
2608 ResponseBodyWrapper: func(r io.Reader) io.Reader {
2609 return &rateLimitedReader{
2610 l: t.cl.config.DownloadRateLimiter,
2615 activeRequests: make(map[Request]webseed.Request, maxRequests),
2617 ws.peer.initRequestState()
2618 for _, opt := range opts {
2621 ws.peer.initUpdateRequestsTimer()
2622 ws.requesterCond.L = t.cl.locker()
2623 for i := 0; i < maxRequests; i += 1 {
2626 for _, f := range t.callbacks().NewPeer {
2629 ws.peer.logger = t.logger.WithContextValue(&ws)
2630 ws.peer.peerImpl = &ws
2632 ws.onGotInfo(t.info)
2634 t.webSeeds[url] = &ws.peer
2637 func (t *Torrent) peerIsActive(p *Peer) (active bool) {
2638 t.iterPeers(func(p1 *Peer) {
2646 func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request {
2647 index := t.pieceIndexOfRequestIndex(ri)
2650 t.piece(index).chunkIndexSpec(ri % t.chunksPerRegularPiece()),
2654 func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex {
2655 return t.pieceRequestIndexOffset(pieceIndex(r.Index)) + RequestIndex(r.Begin/t.chunkSize)
2658 func (t *Torrent) pieceRequestIndexOffset(piece pieceIndex) RequestIndex {
2659 return RequestIndex(piece) * t.chunksPerRegularPiece()
2662 func (t *Torrent) updateComplete() {
2663 t.Complete.SetBool(t.haveAllPieces())
2666 func (t *Torrent) cancelRequest(r RequestIndex) *Peer {
2667 p := t.requestingPeer(r)
2671 // TODO: This is a check that an old invariant holds. It can be removed after some testing.
2672 //delete(t.pendingRequests, r)
2673 if _, ok := t.requestState[r]; ok {
2674 panic("expected request state to be gone")
2679 func (t *Torrent) requestingPeer(r RequestIndex) *Peer {
2680 return t.requestState[r].peer
2683 func (t *Torrent) addConnWithAllPieces(p *Peer) {
2684 if t.connsWithAllPieces == nil {
2685 t.connsWithAllPieces = make(map[*Peer]struct{}, t.maxEstablishedConns)
2687 t.connsWithAllPieces[p] = struct{}{}
2690 func (t *Torrent) deleteConnWithAllPieces(p *Peer) bool {
2691 _, ok := t.connsWithAllPieces[p]
2692 delete(t.connsWithAllPieces, p)
2696 func (t *Torrent) numActivePeers() int {
2697 return len(t.conns) + len(t.webSeeds)
2700 func (t *Torrent) hasStorageCap() bool {
2701 f := t.storage.Capacity
2709 func (t *Torrent) pieceIndexOfRequestIndex(ri RequestIndex) pieceIndex {
2710 return pieceIndex(ri / t.chunksPerRegularPiece())
2713 func (t *Torrent) iterUndirtiedRequestIndexesInPiece(
2714 reuseIter *typedRoaring.Iterator[RequestIndex],
2716 f func(RequestIndex),
2718 reuseIter.Initialize(&t.dirtyChunks)
2719 pieceRequestIndexOffset := t.pieceRequestIndexOffset(piece)
2720 iterBitmapUnsetInRange(
2722 pieceRequestIndexOffset, pieceRequestIndexOffset+t.pieceNumChunks(piece),
2727 type requestState struct {
2732 // Returns an error if a received chunk is out of bounds in someway.
2733 func (t *Torrent) checkValidReceiveChunk(r Request) error {
2735 return errors.New("torrent missing info")
2737 if int(r.Index) >= t.numPieces() {
2738 return fmt.Errorf("chunk index %v, torrent num pieces %v", r.Index, t.numPieces())
2740 pieceLength := t.pieceLength(pieceIndex(r.Index))
2741 if r.Begin >= pieceLength {
2742 return fmt.Errorf("chunk begins beyond end of piece (%v >= %v)", r.Begin, pieceLength)
2744 // We could check chunk lengths here, but chunk request size is not changed often, and tricky
2745 // for peers to manipulate as they need to send potentially large buffers to begin with. There
2746 // should be considerable checks elsewhere for this case due to the network overhead. We should
2747 // catch most of the overflow manipulation stuff by checking index and begin above.
2751 func (t *Torrent) peerConnsWithDialAddrPort(target netip.AddrPort) (ret []*PeerConn) {
2752 for pc := range t.conns {
2753 dialAddr, err := pc.remoteDialAddrPort()
2757 if dialAddr != target {
2760 ret = append(ret, pc)
2765 func wrapUtHolepunchMsgForPeerConn(
2766 recipient *PeerConn,
2767 msg utHolepunch.Msg,
2769 extendedPayload, err := msg.MarshalBinary()
2775 ExtendedID: MapMustGet(recipient.PeerExtensionIDs, utHolepunch.ExtensionName),
2776 ExtendedPayload: extendedPayload,
2780 func sendUtHolepunchMsg(
2782 msgType utHolepunch.MsgType,
2783 addrPort netip.AddrPort,
2784 errCode utHolepunch.ErrCode,
2786 holepunchMsg := utHolepunch.Msg{
2791 incHolepunchMessagesSent(holepunchMsg)
2792 ppMsg := wrapUtHolepunchMsgForPeerConn(pc, holepunchMsg)
2796 func incHolepunchMessages(msg utHolepunch.Msg, verb string) {
2799 "holepunch %v %v messages %v",
2801 addrPortProtocolStr(msg.AddrPort),
2808 func incHolepunchMessagesReceived(msg utHolepunch.Msg) {
2809 incHolepunchMessages(msg, "received")
2812 func incHolepunchMessagesSent(msg utHolepunch.Msg) {
2813 incHolepunchMessages(msg, "sent")
2816 func (t *Torrent) handleReceivedUtHolepunchMsg(msg utHolepunch.Msg, sender *PeerConn) error {
2817 incHolepunchMessagesReceived(msg)
2818 switch msg.MsgType {
2819 case utHolepunch.Rendezvous:
2820 t.logger.Printf("got holepunch rendezvous request for %v from %p", msg.AddrPort, sender)
2821 sendMsg := sendUtHolepunchMsg
2822 senderAddrPort, err := sender.remoteDialAddrPort()
2824 sender.logger.Levelf(
2826 "error getting ut_holepunch rendezvous sender's dial address: %v",
2829 // There's no better error code. The sender's address itself is invalid. I don't see
2830 // this error message being appropriate anywhere else anyway.
2831 sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NoSuchPeer)
2833 targets := t.peerConnsWithDialAddrPort(msg.AddrPort)
2834 if len(targets) == 0 {
2835 sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NotConnected)
2838 for _, pc := range targets {
2839 if !pc.supportsExtension(utHolepunch.ExtensionName) {
2840 sendMsg(sender, utHolepunch.Error, msg.AddrPort, utHolepunch.NoSupport)
2843 sendMsg(sender, utHolepunch.Connect, msg.AddrPort, 0)
2844 sendMsg(pc, utHolepunch.Connect, senderAddrPort, 0)
2847 case utHolepunch.Connect:
2848 holepunchAddr := msg.AddrPort
2849 t.logger.Printf("got holepunch connect request for %v from %p", holepunchAddr, sender)
2850 if g.MapContains(t.cl.undialableWithoutHolepunch, holepunchAddr) {
2851 setAdd(&t.cl.undialableWithoutHolepunchDialedAfterHolepunchConnect, holepunchAddr)
2852 if g.MapContains(t.cl.accepted, holepunchAddr) {
2853 setAdd(&t.cl.probablyOnlyConnectedDueToHolepunch, holepunchAddr)
2856 opts := outgoingConnOpts{
2859 Source: PeerSourceUtHolepunch,
2860 PexPeerFlags: sender.pex.remoteLiveConns[msg.AddrPort].UnwrapOrZeroValue(),
2863 // Don't attempt to start our own rendezvous if we fail to connect.
2864 skipHolepunchRendezvous: true,
2865 receivedHolepunchConnect: true,
2866 // Assume that the other end initiated the rendezvous, and will use our preferred
2867 // encryption. So we will act normally.
2868 HeaderObfuscationPolicy: t.cl.config.HeaderObfuscationPolicy,
2870 initiateConn(opts, true)
2872 case utHolepunch.Error:
2873 torrent.Add("holepunch error messages received", 1)
2874 t.logger.Levelf(log.Debug, "received ut_holepunch error message from %v: %v", sender, msg.ErrCode)
2877 return fmt.Errorf("unhandled msg type %v", msg.MsgType)
2881 func addrPortProtocolStr(addrPort netip.AddrPort) string {
2882 addr := addrPort.Addr()
2893 func (t *Torrent) trySendHolepunchRendezvous(addrPort netip.AddrPort) error {
2895 for pc := range t.conns {
2896 if !pc.supportsExtension(utHolepunch.ExtensionName) {
2899 if pc.supportsExtension(pp.ExtensionNamePex) {
2900 if !g.MapContains(pc.pex.remoteLiveConns, addrPort) {
2904 t.logger.Levelf(log.Debug, "sent ut_holepunch rendezvous message to %v for %v", pc, addrPort)
2905 sendUtHolepunchMsg(pc, utHolepunch.Rendezvous, addrPort, 0)
2909 return errors.New("no eligible relays")
2914 func (t *Torrent) numHalfOpenAttempts() (num int) {
2915 for _, attempts := range t.halfOpen {
2916 num += len(attempts)
2921 func (t *Torrent) getDialTimeoutUnlocked() time.Duration {
2925 return t.dialTimeout()