20 "github.com/RoaringBitmap/roaring"
21 "github.com/anacrolix/chansync"
22 "github.com/anacrolix/dht/v2"
23 "github.com/anacrolix/log"
24 "github.com/anacrolix/missinggo/perf"
25 "github.com/anacrolix/missinggo/pubsub"
26 "github.com/anacrolix/missinggo/slices"
27 "github.com/anacrolix/missinggo/v2"
28 "github.com/anacrolix/missinggo/v2/bitmap"
29 "github.com/anacrolix/missinggo/v2/prioritybitmap"
30 "github.com/anacrolix/multiless"
31 "github.com/anacrolix/sync"
32 "github.com/davecgh/go-spew/spew"
33 "github.com/pion/datachannel"
35 "github.com/anacrolix/torrent/bencode"
36 "github.com/anacrolix/torrent/common"
37 "github.com/anacrolix/torrent/metainfo"
38 pp "github.com/anacrolix/torrent/peer_protocol"
39 "github.com/anacrolix/torrent/segments"
40 "github.com/anacrolix/torrent/storage"
41 "github.com/anacrolix/torrent/tracker"
42 "github.com/anacrolix/torrent/webseed"
43 "github.com/anacrolix/torrent/webtorrent"
46 // Maintains state of torrent within a Client. Many methods should not be called before the info is
47 // available, see .Info and .GotInfo.
49 // Torrent-level aggregate statistics. First in struct to ensure 64-bit
50 // alignment. See #262.
55 networkingEnabled chansync.Flag
56 dataDownloadDisallowed chansync.Flag
57 dataUploadDisallowed bool
58 userOnWriteChunkErr func(error)
60 closed chansync.SetOnce
61 infoHash metainfo.Hash
63 // Values are the piece indices that changed.
64 pieceStateChanges *pubsub.PubSub
65 // The size of chunks to request from peers over the wire. This is
66 // normally 16KiB by convention these days.
69 // Total length of the torrent in bytes. Stored because it's not O(1) to
70 // get this from the info dict.
73 // The storage to open when the info dict becomes available.
74 storageOpener *storage.Client
75 // Storage for torrent data.
76 storage *storage.Torrent
77 // Read-locked for using storage, and write-locked for Closing.
78 storageLock sync.RWMutex
80 // TODO: Only announce stuff is used?
81 metainfo metainfo.MetaInfo
83 // The info dict. nil if we don't have it (yet).
85 fileIndex segments.Index
88 webSeeds map[string]*Peer
90 // Active peer connections, running message stream loops. TODO: Make this
91 // open (not-closed) connections only.
92 conns map[*PeerConn]struct{}
93 maxEstablishedConns int
94 // Set of addrs to which we're attempting to connect. Connections are
95 // half-open until all handshakes are completed.
96 halfOpen map[string]PeerInfo
98 // Reserve of peers to connect to. A peer can be both here and in the
99 // active connections if were told about the peer after connecting with
100 // them. That encourages us to reconnect to peers that are well known in
102 peers prioritizedPeers
103 // Whether we want to know to know more peers.
104 wantPeersEvent missinggo.Event
105 // An announcer for each tracker URL.
106 trackerAnnouncers map[string]torrentTrackerAnnouncer
107 // How many times we've initiated a DHT announce. TODO: Move into stats.
110 // Name used if the info name isn't available. Should be cleared when the
111 // Info does become available.
115 // The bencoded bytes of the info dict. This is actively manipulated if
116 // the info bytes aren't initially available, and we try to fetch them
119 // Each element corresponds to the 16KiB metadata pieces. If true, we have
120 // received that piece.
121 metadataCompletedChunks []bool
122 metadataChanged sync.Cond
124 // Closed when .Info is obtained.
125 gotMetainfoC chan struct{}
127 readers map[*reader]struct{}
128 _readerNowPieces bitmap.Bitmap
129 _readerReadaheadPieces bitmap.Bitmap
131 // A cache of pieces we need to get. Calculated from various piece and
132 // file priorities and completion states elsewhere.
133 _pendingPieces prioritybitmap.PriorityBitmap
134 // A cache of completed piece indices.
135 _completedPieces roaring.Bitmap
136 // Pieces that need to be hashed.
137 piecesQueuedForHash bitmap.Bitmap
138 activePieceHashes int
140 // A pool of piece priorities []int for assignment to new connections.
141 // These "inclinations" are used to give connections preference for
143 connPieceInclinationPool sync.Pool
145 // Count of each request across active connections.
146 pendingRequests map[RequestIndex]int
151 func (t *Torrent) pieceAvailabilityFromPeers(i pieceIndex) (count int) {
152 t.iterPeers(func(peer *Peer) {
153 if peer.peerHasPiece(i) {
160 func (t *Torrent) decPieceAvailability(i pieceIndex) {
165 if p.availability <= 0 {
166 panic(p.availability)
171 func (t *Torrent) incPieceAvailability(i pieceIndex) {
172 // If we don't the info, this should be reconciled when we do.
179 func (t *Torrent) readerNowPieces() bitmap.Bitmap {
180 return t._readerNowPieces
183 func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
184 return t._readerReadaheadPieces
187 func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool {
188 return !t.wantPieceIndex(i)
191 func (t *Torrent) pendingPieces() *prioritybitmap.PriorityBitmap {
192 return &t._pendingPieces
195 // Returns a channel that is closed when the Torrent is closed.
196 func (t *Torrent) Closed() chansync.Done {
197 return t.closed.Done()
200 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
201 // pending, and half-open peers.
202 func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
203 // Add pending peers to the list
204 t.peers.Each(func(peer PeerInfo) {
205 ks = append(ks, peer)
208 // Add half-open peers to the list
209 for _, peer := range t.halfOpen {
210 ks = append(ks, peer)
213 // Add active peers to the list
214 for conn := range t.conns {
216 ks = append(ks, PeerInfo{
218 Addr: conn.RemoteAddr,
219 Source: conn.Discovery,
220 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
221 // > But if we're not connected to them with an encrypted connection, I couldn't say
222 // > what's appropriate. We can carry forward the SupportsEncryption value as we
223 // > received it from trackers/DHT/PEX, or just use the encryption state for the
224 // > connection. It's probably easiest to do the latter for now.
225 // https://github.com/anacrolix/torrent/pull/188
226 SupportsEncryption: conn.headerEncrypted,
233 func (t *Torrent) setChunkSize(size pp.Integer) {
235 t.chunkPool = sync.Pool{
236 New: func() interface{} {
237 b := make([]byte, size)
243 func (t *Torrent) pieceComplete(piece pieceIndex) bool {
244 return t._completedPieces.Contains(bitmap.BitIndex(piece))
247 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
248 return t.pieces[piece].Storage().Completion()
251 // There's a connection to that address already.
252 func (t *Torrent) addrActive(addr string) bool {
253 if _, ok := t.halfOpen[addr]; ok {
256 for c := range t.conns {
258 if ra.String() == addr {
265 func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn {
266 for c := range t.conns {
267 if !c.closed.IsSet() {
274 func (t *Torrent) addPeer(p PeerInfo) (added bool) {
276 torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1)
277 if t.closed.IsSet() {
280 if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok {
281 if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) {
282 torrent.Add("peers not added because of bad addr", 1)
283 // cl.logger.Printf("peers not added because of bad addr: %v", p)
287 if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok {
288 torrent.Add("peers replaced", 1)
289 if !replaced.equal(p) {
290 t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced)
297 for t.peers.Len() > cl.config.TorrentPeersHighWater {
298 _, ok := t.peers.DeleteMin()
300 torrent.Add("excess reserve peers discarded", 1)
306 func (t *Torrent) invalidateMetadata() {
307 for i := 0; i < len(t.metadataCompletedChunks); i++ {
308 t.metadataCompletedChunks[i] = false
311 t.gotMetainfoC = make(chan struct{})
316 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
320 if index >= len(t.metadataCompletedChunks) {
321 t.logger.Printf("%s: ignoring metadata piece %d", t, index)
324 copy(t.metadataBytes[(1<<14)*index:], data)
325 t.metadataCompletedChunks[index] = true
328 func (t *Torrent) metadataPieceCount() int {
329 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
332 func (t *Torrent) haveMetadataPiece(piece int) bool {
334 return (1<<14)*piece < len(t.metadataBytes)
336 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
340 func (t *Torrent) metadataSize() int {
341 return len(t.metadataBytes)
344 func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
345 for i := 0; i < len(info.Pieces); i += sha1.Size {
346 ret = append(ret, info.Pieces[i:i+sha1.Size])
351 func (t *Torrent) makePieces() {
352 hashes := infoPieceHashes(t.info)
353 t.pieces = make([]Piece, len(hashes))
354 for i, hash := range hashes {
355 piece := &t.pieces[i]
357 piece.index = pieceIndex(i)
358 piece.noPendingWrites.L = &piece.pendingWritesMutex
359 piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
361 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
362 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
363 piece.files = files[beginFile:endFile]
367 // Returns the index of the first file containing the piece. files must be
368 // ordered by offset.
369 func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
370 for i, f := range files {
371 if f.offset+f.length > pieceOffset {
378 // Returns the index after the last file containing the piece. files must be
379 // ordered by offset.
380 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
381 for i, f := range files {
382 if f.offset+f.length >= pieceEndOffset {
389 func (t *Torrent) cacheLength() {
391 for _, f := range t.info.UpvertedFiles() {
397 // TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
399 func (t *Torrent) setInfo(info *metainfo.Info) error {
400 if err := validateInfo(info); err != nil {
401 return fmt.Errorf("bad info: %s", err)
403 if t.storageOpener != nil {
405 t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
407 return fmt.Errorf("error opening torrent storage: %s", err)
413 t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
414 t.displayName = "" // Save a few bytes lol.
421 // This seems to be all the follow-up tasks after info is set, that can't fail.
422 func (t *Torrent) onSetInfo() {
423 t.iterPeers(func(p *Peer) {
426 for i := range t.pieces {
428 // Need to add availability before updating piece completion, as that may result in conns
430 if p.availability != 0 {
431 panic(p.availability)
433 p.availability = int64(t.pieceAvailabilityFromPeers(i))
434 t.updatePieceCompletion(pieceIndex(i))
435 if !p.storageCompletionOk {
436 // t.logger.Printf("piece %s completion unknown, queueing check", p)
437 t.queuePieceCheck(pieceIndex(i))
440 t.cl.event.Broadcast()
441 close(t.gotMetainfoC)
442 t.updateWantPeersEvent()
443 t.pendingRequests = make(map[RequestIndex]int)
444 t.tryCreateMorePieceHashers()
447 // Called when metadata for a torrent becomes available.
448 func (t *Torrent) setInfoBytesLocked(b []byte) error {
449 if metainfo.HashBytes(b) != t.infoHash {
450 return errors.New("info bytes have wrong hash")
452 var info metainfo.Info
453 if err := bencode.Unmarshal(b, &info); err != nil {
454 return fmt.Errorf("error unmarshalling info bytes: %s", err)
457 t.metadataCompletedChunks = nil
461 if err := t.setInfo(&info); err != nil {
468 func (t *Torrent) haveAllMetadataPieces() bool {
472 if t.metadataCompletedChunks == nil {
475 for _, have := range t.metadataCompletedChunks {
483 // TODO: Propagate errors to disconnect peer.
484 func (t *Torrent) setMetadataSize(size int) (err error) {
486 // We already know the correct metadata size.
489 if uint32(size) > maxMetadataSize {
490 return errors.New("bad size")
492 if len(t.metadataBytes) == size {
495 t.metadataBytes = make([]byte, size)
496 t.metadataCompletedChunks = make([]bool, (size+(1<<14)-1)/(1<<14))
497 t.metadataChanged.Broadcast()
498 for c := range t.conns {
499 c.requestPendingMetadata()
504 // The current working name for the torrent. Either the name in the info dict,
505 // or a display name given such as by the dn value in a magnet link, or "".
506 func (t *Torrent) name() string {
508 defer t.nameMu.RUnlock()
512 if t.displayName != "" {
515 return "infohash:" + t.infoHash.HexString()
518 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
519 p := &t.pieces[index]
520 ret.Priority = t.piecePriority(index)
521 ret.Completion = p.completion()
522 ret.QueuedForHash = p.queuedForHash()
523 ret.Hashing = p.hashing
524 ret.Checking = ret.QueuedForHash || ret.Hashing
525 ret.Marking = p.marking
526 if !ret.Complete && t.piecePartiallyDownloaded(index) {
532 func (t *Torrent) metadataPieceSize(piece int) int {
533 return metadataPieceSize(len(t.metadataBytes), piece)
536 func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType pp.ExtendedMetadataRequestMsgType, piece int, data []byte) pp.Message {
539 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
540 ExtendedPayload: append(bencode.MustMarshal(pp.ExtendedMetadataRequestMsg{
542 TotalSize: len(t.metadataBytes),
548 type pieceAvailabilityRun struct {
553 func (me pieceAvailabilityRun) String() string {
554 return fmt.Sprintf("%v(%v)", me.count, me.availability)
557 func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) {
558 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
559 ret = append(ret, pieceAvailabilityRun{availability: el.(int64), count: int(count)})
561 for i := range t.pieces {
562 rle.Append(t.pieces[i].availability, 1)
568 func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
569 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
570 ret = append(ret, PieceStateRun{
571 PieceState: el.(PieceState),
575 for index := range t.pieces {
576 rle.Append(t.pieceState(pieceIndex(index)), 1)
582 // Produces a small string representing a PieceStateRun.
583 func (psr PieceStateRun) String() (ret string) {
584 ret = fmt.Sprintf("%d", psr.Length)
585 ret += func() string {
586 switch psr.Priority {
587 case PiecePriorityNext:
589 case PiecePriorityNormal:
591 case PiecePriorityReadahead:
593 case PiecePriorityNow:
595 case PiecePriorityHigh:
604 if psr.QueuedForHash {
622 func (t *Torrent) writeStatus(w io.Writer) {
623 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
624 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
626 fmt.Fprintf(w, "Metadata have: ")
627 for _, h := range t.metadataCompletedChunks {
628 fmt.Fprintf(w, "%c", func() rune {
638 fmt.Fprintf(w, "Piece length: %s\n",
641 return fmt.Sprintf("%v (%v chunks)",
643 float64(t.usualPieceSize())/float64(t.chunkSize))
650 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
651 fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns())
652 fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
653 for _, run := range t.pieceAvailabilityRuns() {
654 ret = append(ret, run.String())
659 fmt.Fprintf(w, "Reader Pieces:")
660 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
661 fmt.Fprintf(w, " %d:%d", begin, end)
666 fmt.Fprintf(w, "Enabled trackers:\n")
668 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
669 fmt.Fprintf(tw, " URL\tExtra\n")
670 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r torrentTrackerAnnouncer) bool {
673 var luns, runs url.URL = *lu, *ru
676 var ml missinggo.MultiLess
677 ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
678 ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
680 }).([]torrentTrackerAnnouncer) {
681 fmt.Fprintf(tw, " %q\t%v\n", ta.URL(), ta.statusLine())
686 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
688 spew.NewDefaultConfig()
689 spew.Fdump(w, t.statsLocked())
691 peers := t.peersAsSlice()
692 sort.Slice(peers, func(_i, _j int) bool {
695 if less, ok := multiless.New().EagerSameLess(
696 i.downloadRate() == j.downloadRate(), i.downloadRate() < j.downloadRate(),
700 return worseConn(i, j)
702 for i, c := range peers {
703 fmt.Fprintf(w, "%2d. ", i+1)
708 func (t *Torrent) haveInfo() bool {
712 // Returns a run-time generated MetaInfo that includes the info bytes and
713 // announce-list as currently known to the client.
714 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
715 return metainfo.MetaInfo{
716 CreationDate: time.Now().Unix(),
717 Comment: "dynamic metainfo from client",
718 CreatedBy: "go.torrent",
719 AnnounceList: t.metainfo.UpvertedAnnounceList().Clone(),
720 InfoBytes: func() []byte {
722 return t.metadataBytes
727 UrlList: func() []string {
728 ret := make([]string, 0, len(t.webSeeds))
729 for url := range t.webSeeds {
730 ret = append(ret, url)
737 func (t *Torrent) BytesMissing() int64 {
740 return t.bytesMissingLocked()
743 func (t *Torrent) bytesMissingLocked() int64 {
747 func iterFlipped(b *roaring.Bitmap, end uint64, cb func(uint32) bool) {
748 roaring.Flip(b, 0, end).Iterate(cb)
751 func (t *Torrent) bytesLeft() (left int64) {
752 iterFlipped(&t._completedPieces, uint64(t.numPieces()), func(x uint32) bool {
753 p := t.piece(pieceIndex(x))
754 left += int64(p.length() - p.numDirtyBytes())
760 // Bytes left to give in tracker announces.
761 func (t *Torrent) bytesLeftAnnounce() int64 {
769 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
770 if t.pieceComplete(piece) {
773 if t.pieceAllDirty(piece) {
776 return t.pieces[piece].hasDirtyChunks()
779 func (t *Torrent) usualPieceSize() int {
780 return int(t.info.PieceLength)
783 func (t *Torrent) numPieces() pieceIndex {
784 return pieceIndex(t.info.NumPieces())
787 func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
788 return pieceIndex(t._completedPieces.GetCardinality())
791 func (t *Torrent) close(wg *sync.WaitGroup) (err error) {
793 if t.storage != nil {
798 defer t.storageLock.Unlock()
799 if f := t.storage.Close; f != nil {
802 t.logger.WithDefaultLevel(log.Warning).Printf("error closing storage: %v", err1)
807 t.iterPeers(func(p *Peer) {
811 t.cl.event.Broadcast()
812 t.pieceStateChanges.Close()
813 t.updateWantPeersEvent()
817 func (t *Torrent) requestOffset(r Request) int64 {
818 return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
821 // Return the request that would include the given offset into the torrent data. Returns !ok if
822 // there is no such request.
823 func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
824 return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
827 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
828 defer perf.ScopeTimerErr(&err)()
829 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
830 if err == nil && n != len(data) {
831 err = io.ErrShortWrite
836 func (t *Torrent) bitfield() (bf []bool) {
837 bf = make([]bool, t.numPieces())
838 t._completedPieces.Iterate(func(piece uint32) (again bool) {
845 func (t *Torrent) chunksPerRegularPiece() uint32 {
846 return uint32((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
849 func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType {
850 return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
853 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
854 t.pieces[pieceIndex]._dirtyChunks.Clear()
857 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
858 if t.info.PieceLength == 0 {
859 // There will be no variance amongst pieces. Only pain.
862 if piece == t.numPieces()-1 {
863 ret := pp.Integer(*t.length % t.info.PieceLength)
868 return pp.Integer(t.info.PieceLength)
871 func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash, err error) {
873 p.waitNoPendingWrites()
874 storagePiece := t.pieces[piece].Storage()
876 //Does the backend want to do its own hashing?
877 if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
878 var sum metainfo.Hash
879 //log.Printf("A piece decided to self-hash: %d", piece)
880 sum, err = i.SelfHash()
881 missinggo.CopyExact(&ret, sum)
885 hash := pieceHash.New()
886 const logPieceContents = false
887 if logPieceContents {
888 var examineBuf bytes.Buffer
889 _, err = storagePiece.WriteTo(io.MultiWriter(hash, &examineBuf))
890 log.Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
892 _, err = storagePiece.WriteTo(hash)
894 missinggo.CopyExact(&ret, hash.Sum(nil))
898 func (t *Torrent) haveAnyPieces() bool {
899 return t._completedPieces.GetCardinality() != 0
902 func (t *Torrent) haveAllPieces() bool {
906 return t._completedPieces.GetCardinality() == bitmap.BitRange(t.numPieces())
909 func (t *Torrent) havePiece(index pieceIndex) bool {
910 return t.haveInfo() && t.pieceComplete(index)
913 func (t *Torrent) maybeDropMutuallyCompletePeer(
914 // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's okay?
917 if !t.cl.config.DropMutuallyCompletePeers {
920 if !t.haveAllPieces() {
923 if all, known := p.peerHasAllPieces(); !(known && all) {
929 t.logger.WithDefaultLevel(log.Debug).Printf("dropping %v, which is mutually complete", p)
933 func (t *Torrent) haveChunk(r Request) (ret bool) {
935 // log.Println("have chunk", r, ret)
940 if t.pieceComplete(pieceIndex(r.Index)) {
943 p := &t.pieces[r.Index]
944 return !p.pendingChunk(r.ChunkSpec, t.chunkSize)
947 func chunkIndexFromChunkSpec(cs ChunkSpec, chunkSize pp.Integer) chunkIndexType {
948 return chunkIndexType(cs.Begin / chunkSize)
951 func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
952 // TODO: Are these overly conservative, should we be guarding this here?
957 if index < 0 || index >= t.numPieces() {
961 p := &t.pieces[index]
962 if p.queuedForHash() {
968 if t.pieceComplete(index) {
971 if t._pendingPieces.Contains(int(index)) {
974 // t.logger.Printf("piece %d not pending", index)
975 return !t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
976 return index < begin || index >= end
980 // A pool of []*PeerConn, to reduce allocations in functions that need to index or sort Torrent
981 // conns (which is a map).
982 var peerConnSlices sync.Pool
984 // The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
985 // connection is one that usually sends us unwanted pieces, or has been in the worse half of the
986 // established connections for more than a minute. This is O(n log n). If there was a way to not
987 // consider the position of a conn relative to the total number, it could be reduced to O(n).
988 func (t *Torrent) worstBadConn() (ret *PeerConn) {
990 getInterface := peerConnSlices.Get()
991 if getInterface == nil {
992 sl = make([]*PeerConn, 0, len(t.conns))
994 sl = getInterface.([]*PeerConn)[:0]
996 sl = t.appendUnclosedConns(sl)
997 defer peerConnSlices.Put(sl)
998 wcs := worseConnSlice{sl}
1000 for wcs.Len() != 0 {
1001 c := heap.Pop(&wcs).(*PeerConn)
1002 if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
1005 // If the connection is in the worst half of the established
1006 // connection quota and is older than a minute.
1007 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
1008 // Give connections 1 minute to prove themselves.
1009 if time.Since(c.completedHandshake) > time.Minute {
1017 type PieceStateChange struct {
1022 func (t *Torrent) publishPieceChange(piece pieceIndex) {
1023 t.cl._mu.Defer(func() {
1024 cur := t.pieceState(piece)
1025 p := &t.pieces[piece]
1026 if cur != p.publicPieceState {
1027 p.publicPieceState = cur
1028 t.pieceStateChanges.Publish(PieceStateChange{
1036 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
1037 if t.pieceComplete(piece) {
1040 return pp.Integer(t.pieceNumChunks(piece)) - t.pieces[piece].numDirtyChunks()
1043 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
1044 return t.pieces[piece]._dirtyChunks.Len() == bitmap.BitRange(t.pieceNumChunks(piece))
1047 func (t *Torrent) readersChanged() {
1048 t.updateReaderPieces()
1049 t.updateAllPiecePriorities()
1052 func (t *Torrent) updateReaderPieces() {
1053 t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
1056 func (t *Torrent) readerPosChanged(from, to pieceRange) {
1060 t.updateReaderPieces()
1061 // Order the ranges, high and low.
1063 if l.begin > h.begin {
1066 if l.end < h.begin {
1067 // Two distinct ranges.
1068 t.updatePiecePriorities(l.begin, l.end)
1069 t.updatePiecePriorities(h.begin, h.end)
1076 t.updatePiecePriorities(l.begin, end)
1080 func (t *Torrent) maybeNewConns() {
1081 // Tickle the accept routine.
1082 t.cl.event.Broadcast()
1086 func (t *Torrent) piecePriorityChanged(piece pieceIndex) {
1087 // t.logger.Printf("piece %d priority changed", piece)
1088 t.iterPeers(func(c *Peer) {
1089 if c.updatePiecePriority(piece) {
1090 // log.Print("conn piece priority changed")
1095 t.publishPieceChange(piece)
1098 func (t *Torrent) updatePiecePriority(piece pieceIndex) {
1099 p := &t.pieces[piece]
1100 newPrio := p.uncachedPriority()
1101 // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
1102 if newPrio == PiecePriorityNone {
1103 if !t._pendingPieces.Remove(int(piece)) {
1107 if !t._pendingPieces.Set(int(piece), newPrio.BitmapPriority()) {
1111 t.piecePriorityChanged(piece)
1114 func (t *Torrent) updateAllPiecePriorities() {
1115 t.updatePiecePriorities(0, t.numPieces())
1118 // Update all piece priorities in one hit. This function should have the same
1119 // output as updatePiecePriority, but across all pieces.
1120 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex) {
1121 for i := begin; i < end; i++ {
1122 t.updatePiecePriority(i)
1126 // Returns the range of pieces [begin, end) that contains the extent of bytes.
1127 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
1128 if off >= *t.length {
1138 begin = pieceIndex(off / t.info.PieceLength)
1139 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
1140 if end > pieceIndex(t.info.NumPieces()) {
1141 end = pieceIndex(t.info.NumPieces())
1146 // Returns true if all iterations complete without breaking. Returns the read
1147 // regions for all readers. The reader regions should not be merged as some
1148 // callers depend on this method to enumerate readers.
1149 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
1150 for r := range t.readers {
1152 if p.begin >= p.end {
1155 if !f(p.begin, p.end) {
1162 func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
1163 prio, ok := t._pendingPieces.GetPriority(piece)
1165 return PiecePriorityNone
1170 ret := piecePriority(-prio)
1171 if ret == PiecePriorityNone {
1177 func (t *Torrent) pendRequest(req RequestIndex) {
1178 t.piece(int(req / t.chunksPerRegularPiece())).pendChunkIndex(req % t.chunksPerRegularPiece())
1181 func (t *Torrent) pieceCompletionChanged(piece pieceIndex) {
1182 t.cl.event.Broadcast()
1183 if t.pieceComplete(piece) {
1184 t.onPieceCompleted(piece)
1186 t.onIncompletePiece(piece)
1188 t.updatePiecePriority(piece)
1191 func (t *Torrent) numReceivedConns() (ret int) {
1192 for c := range t.conns {
1193 if c.Discovery == PeerSourceIncoming {
1200 func (t *Torrent) maxHalfOpen() int {
1201 // Note that if we somehow exceed the maximum established conns, we want
1202 // the negative value to have an effect.
1203 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
1204 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
1205 // We want to allow some experimentation with new peers, and to try to
1206 // upset an oversupply of received connections.
1207 return int(min(max(5, extraIncoming)+establishedHeadroom, int64(t.cl.config.HalfOpenConnsPerTorrent)))
1210 func (t *Torrent) openNewConns() (initiated int) {
1211 defer t.updateWantPeersEvent()
1212 for t.peers.Len() != 0 {
1216 if len(t.halfOpen) >= t.maxHalfOpen() {
1219 if len(t.cl.dialers) == 0 {
1222 if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns {
1225 p := t.peers.PopMax()
1232 func (t *Torrent) getConnPieceInclination() []int {
1233 _ret := t.connPieceInclinationPool.Get()
1235 pieceInclinationsNew.Add(1)
1236 return rand.Perm(int(t.numPieces()))
1238 pieceInclinationsReused.Add(1)
1239 return *_ret.(*[]int)
1242 func (t *Torrent) putPieceInclination(pi []int) {
1243 t.connPieceInclinationPool.Put(&pi)
1244 pieceInclinationsPut.Add(1)
1247 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
1249 uncached := t.pieceCompleteUncached(piece)
1250 cached := p.completion()
1251 changed := cached != uncached
1252 complete := uncached.Complete
1253 p.storageCompletionOk = uncached.Ok
1256 t._completedPieces.Add(x)
1258 t._completedPieces.Remove(x)
1260 if complete && len(p.dirtiers) != 0 {
1261 t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
1264 log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).SetLevel(log.Debug).Log(t.logger)
1265 t.pieceCompletionChanged(piece)
1270 // Non-blocking read. Client lock is not required.
1271 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1273 p := &t.pieces[off/t.info.PieceLength]
1274 p.waitNoPendingWrites()
1276 n1, err = p.Storage().ReadAt(b, off-p.Info().Offset())
1287 // Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on
1288 // the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage
1289 // etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately.
1290 func (t *Torrent) maybeCompleteMetadata() error {
1295 if !t.haveAllMetadataPieces() {
1296 // Don't have enough metadata pieces.
1299 err := t.setInfoBytesLocked(t.metadataBytes)
1301 t.invalidateMetadata()
1302 return fmt.Errorf("error setting info bytes: %s", err)
1304 if t.cl.config.Debug {
1305 t.logger.Printf("%s: got metadata from peers", t)
1310 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1311 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
1313 now.Add(bitmap.BitIndex(begin))
1314 readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end))
1321 func (t *Torrent) needData() bool {
1322 if t.closed.IsSet() {
1328 return t._pendingPieces.Len() != 0
1331 func appendMissingStrings(old, new []string) (ret []string) {
1334 for _, n := range new {
1335 for _, o := range old {
1340 ret = append(ret, n)
1345 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1347 for minNumTiers > len(ret) {
1348 ret = append(ret, nil)
1353 func (t *Torrent) addTrackers(announceList [][]string) {
1354 fullAnnounceList := &t.metainfo.AnnounceList
1355 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1356 for tierIndex, trackerURLs := range announceList {
1357 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1359 t.startMissingTrackerScrapers()
1360 t.updateWantPeersEvent()
1363 // Don't call this before the info is available.
1364 func (t *Torrent) bytesCompleted() int64 {
1368 return *t.length - t.bytesLeft()
1371 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1374 return t.setInfoBytesLocked(b)
1377 // Returns true if connection is removed from torrent.Conns.
1378 func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
1379 if !c.closed.IsSet() {
1380 panic("connection is not closed")
1381 // There are behaviours prevented by the closed state that will fail
1382 // if the connection has been deleted.
1386 // Avoid adding a drop event more than once. Probably we should track whether we've generated
1387 // the drop event against the PexConnState instead.
1389 if !t.cl.config.DisablePEX {
1393 torrent.Add("deleted connections", 1)
1394 c.deleteAllRequests()
1395 if t.numActivePeers() == 0 {
1396 t.assertNoPendingRequests()
1401 func (t *Torrent) decPeerPieceAvailability(p *Peer) {
1405 p.newPeerPieces().IterTyped(func(i int) bool {
1406 p.t.decPieceAvailability(i)
1411 func (t *Torrent) numActivePeers() (num int) {
1412 t.iterPeers(func(*Peer) {
1418 func (t *Torrent) assertNoPendingRequests() {
1419 if len(t.pendingRequests) != 0 {
1420 panic(t.pendingRequests)
1422 //if len(t.lastRequested) != 0 {
1423 // panic(t.lastRequested)
1427 func (t *Torrent) dropConnection(c *PeerConn) {
1428 t.cl.event.Broadcast()
1430 if t.deletePeerConn(c) {
1435 func (t *Torrent) wantPeers() bool {
1436 if t.closed.IsSet() {
1439 if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
1442 return t.needData() || t.seeding()
1445 func (t *Torrent) updateWantPeersEvent() {
1447 t.wantPeersEvent.Set()
1449 t.wantPeersEvent.Clear()
1453 // Returns whether the client should make effort to seed the torrent.
1454 func (t *Torrent) seeding() bool {
1456 if t.closed.IsSet() {
1459 if t.dataUploadDisallowed {
1462 if cl.config.NoUpload {
1465 if !cl.config.Seed {
1468 if cl.config.DisableAggressiveUpload && t.needData() {
1474 func (t *Torrent) onWebRtcConn(
1475 c datachannel.ReadWriteCloser,
1476 dcc webtorrent.DataChannelContext,
1479 pc, err := t.cl.initiateProtocolHandshakes(
1480 context.Background(),
1481 webrtcNetConn{c, dcc},
1485 webrtcNetAddr{dcc.Remote},
1487 fmt.Sprintf("webrtc offer_id %x", dcc.OfferId),
1490 t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
1493 if dcc.LocalOffered {
1494 pc.Discovery = PeerSourceTracker
1496 pc.Discovery = PeerSourceIncoming
1500 err = t.cl.runHandshookConn(pc, t)
1502 t.logger.WithDefaultLevel(log.Critical).Printf("error running handshook webrtc conn: %v", err)
1506 func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
1507 err := t.cl.runHandshookConn(pc, t)
1508 if err != nil || logAll {
1509 t.logger.WithDefaultLevel(level).Printf("error running handshook conn: %v", err)
1513 func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
1514 t.logRunHandshookConn(pc, false, log.Debug)
1517 func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
1518 wtc, release := t.cl.websocketTrackers.Get(u.String())
1523 wst := websocketTrackerStatus{u, wtc}
1525 err := wtc.Announce(tracker.Started, t.infoHash)
1527 t.logger.WithDefaultLevel(log.Warning).Printf(
1528 "error in initial announce to %q: %v",
1537 func (t *Torrent) startScrapingTracker(_url string) {
1541 u, err := url.Parse(_url)
1543 // URLs with a leading '*' appear to be a uTorrent convention to
1544 // disable trackers.
1546 log.Str("error parsing tracker url").AddValues("url", _url).Log(t.logger)
1550 if u.Scheme == "udp" {
1552 t.startScrapingTracker(u.String())
1554 t.startScrapingTracker(u.String())
1557 if _, ok := t.trackerAnnouncers[_url]; ok {
1560 sl := func() torrentTrackerAnnouncer {
1563 if t.cl.config.DisableWebtorrent {
1566 return t.startWebsocketAnnouncer(*u)
1568 if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 {
1572 if t.cl.config.DisableIPv6 {
1576 newAnnouncer := &trackerScraper{
1580 go newAnnouncer.Run()
1586 if t.trackerAnnouncers == nil {
1587 t.trackerAnnouncers = make(map[string]torrentTrackerAnnouncer)
1589 t.trackerAnnouncers[_url] = sl
1592 // Adds and starts tracker scrapers for tracker URLs that aren't already
1594 func (t *Torrent) startMissingTrackerScrapers() {
1595 if t.cl.config.DisableTrackers {
1598 t.startScrapingTracker(t.metainfo.Announce)
1599 for _, tier := range t.metainfo.AnnounceList {
1600 for _, url := range tier {
1601 t.startScrapingTracker(url)
1606 // Returns an AnnounceRequest with fields filled out to defaults and current
1608 func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
1609 // Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's
1610 // dependent on the network in use.
1611 return tracker.AnnounceRequest{
1613 NumWant: func() int32 {
1614 if t.wantPeers() && len(t.cl.dialers) > 0 {
1620 Port: uint16(t.cl.incomingPeerPort()),
1621 PeerId: t.cl.peerID,
1622 InfoHash: t.infoHash,
1623 Key: t.cl.announceKey(),
1625 // The following are vaguely described in BEP 3.
1627 Left: t.bytesLeftAnnounce(),
1628 Uploaded: t.stats.BytesWrittenData.Int64(),
1629 // There's no mention of wasted or unwanted download in the BEP.
1630 Downloaded: t.stats.BytesReadUsefulData.Int64(),
1634 // Adds peers revealed in an announce until the announce ends, or we have
1636 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
1638 for v := range pvs {
1641 for _, cp := range v.Peers {
1643 // Can't do anything with this.
1646 if t.addPeer(PeerInfo{
1647 Addr: ipPortAddr{cp.IP, cp.Port},
1648 Source: PeerSourceDhtGetPeers,
1655 // log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
1660 // Announce using the provided DHT server. Peers are consumed automatically. done is closed when the
1661 // announce ends. stop will force the announce to end.
1662 func (t *Torrent) AnnounceToDht(s DhtServer) (done <-chan struct{}, stop func(), err error) {
1663 ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), true)
1667 _done := make(chan struct{})
1671 t.consumeDhtAnnouncePeers(ps.Peers())
1677 func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error {
1678 _, stop, err := t.AnnounceToDht(s)
1683 case <-t.closed.Done():
1684 case <-time.After(5 * time.Minute):
1690 func (t *Torrent) dhtAnnouncer(s DhtServer) {
1696 if t.closed.IsSet() {
1702 // TODO: Determine if there's a listener on the port we're announcing.
1703 if len(cl.dialers) == 0 && len(cl.listeners) == 0 {
1714 err := t.timeboxedAnnounceToDht(s)
1716 t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err)
1722 func (t *Torrent) addPeers(peers []PeerInfo) (added int) {
1723 for _, p := range peers {
1731 // The returned TorrentStats may require alignment in memory. See
1732 // https://github.com/anacrolix/torrent/issues/383.
1733 func (t *Torrent) Stats() TorrentStats {
1735 defer t.cl.rUnlock()
1736 return t.statsLocked()
1739 func (t *Torrent) statsLocked() (ret TorrentStats) {
1740 ret.ActivePeers = len(t.conns)
1741 ret.HalfOpenPeers = len(t.halfOpen)
1742 ret.PendingPeers = t.peers.Len()
1743 ret.TotalPeers = t.numTotalPeers()
1744 ret.ConnectedSeeders = 0
1745 for c := range t.conns {
1746 if all, ok := c.peerHasAllPieces(); all && ok {
1747 ret.ConnectedSeeders++
1750 ret.ConnStats = t.stats.Copy()
1754 // The total number of peers in the torrent.
1755 func (t *Torrent) numTotalPeers() int {
1756 peers := make(map[string]struct{})
1757 for conn := range t.conns {
1758 ra := conn.conn.RemoteAddr()
1760 // It's been closed and doesn't support RemoteAddr.
1763 peers[ra.String()] = struct{}{}
1765 for addr := range t.halfOpen {
1766 peers[addr] = struct{}{}
1768 t.peers.Each(func(peer PeerInfo) {
1769 peers[peer.Addr.String()] = struct{}{}
1774 // Reconcile bytes transferred before connection was associated with a
1776 func (t *Torrent) reconcileHandshakeStats(c *PeerConn) {
1777 if c._stats != (ConnStats{
1778 // Handshakes should only increment these fields:
1779 BytesWritten: c._stats.BytesWritten,
1780 BytesRead: c._stats.BytesRead,
1784 c.postHandshakeStats(func(cs *ConnStats) {
1785 cs.BytesRead.Add(c._stats.BytesRead.Int64())
1786 cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
1788 c.reconciledHandshakeStats = true
1791 // Returns true if the connection is added.
1792 func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
1795 torrent.Add("added connections", 1)
1798 if t.closed.IsSet() {
1799 return errors.New("torrent closed")
1801 for c0 := range t.conns {
1802 if c.PeerID != c0.PeerID {
1805 if !t.cl.config.DropDuplicatePeerIds {
1808 if left, ok := c.hasPreferredNetworkOver(c0); ok && left {
1810 t.deletePeerConn(c0)
1812 return errors.New("existing connection preferred")
1815 if len(t.conns) >= t.maxEstablishedConns {
1816 c := t.worstBadConn()
1818 return errors.New("don't want conns")
1823 if len(t.conns) >= t.maxEstablishedConns {
1826 t.conns[c] = struct{}{}
1827 if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() {
1828 t.pex.Add(c) // as no further extended handshake expected
1833 func (t *Torrent) wantConns() bool {
1834 if !t.networkingEnabled.Bool() {
1837 if t.closed.IsSet() {
1840 if !t.seeding() && !t.needData() {
1843 if len(t.conns) < t.maxEstablishedConns {
1846 return t.worstBadConn() != nil
1849 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1852 oldMax = t.maxEstablishedConns
1853 t.maxEstablishedConns = max
1854 wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), func(l, r *PeerConn) bool {
1855 return worseConn(&l.Peer, &r.Peer)
1857 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1858 t.dropConnection(wcs.Pop().(*PeerConn))
1864 func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
1865 t.logger.Log(log.Fstr("hashed piece %d (passed=%t)", piece, passed).SetLevel(log.Debug))
1868 t.cl.event.Broadcast()
1869 if t.closed.IsSet() {
1873 // Don't score the first time a piece is hashed, it could be an initial check.
1874 if p.storageCompletionOk {
1876 pieceHashedCorrect.Add(1)
1879 "piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
1880 ).AddValues(t, p).SetLevel(log.Debug).Log(t.logger)
1881 pieceHashedNotCorrect.Add(1)
1886 t.publishPieceChange(piece)
1889 t.publishPieceChange(piece)
1893 if len(p.dirtiers) != 0 {
1894 // Don't increment stats above connection-level for every involved connection.
1895 t.allStats((*ConnStats).incrementPiecesDirtiedGood)
1897 for c := range p.dirtiers {
1898 c._stats.incrementPiecesDirtiedGood()
1900 t.clearPieceTouchers(piece)
1902 err := p.Storage().MarkComplete()
1904 t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
1908 if t.closed.IsSet() {
1911 t.pendAllChunkSpecs(piece)
1913 if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil {
1914 // Peers contributed to all the data for this piece hash failure, and the failure was
1915 // not due to errors in the storage (such as data being dropped in a cache).
1917 // Increment Torrent and above stats, and then specific connections.
1918 t.allStats((*ConnStats).incrementPiecesDirtiedBad)
1919 for c := range p.dirtiers {
1920 // Y u do dis peer?!
1921 c.stats().incrementPiecesDirtiedBad()
1924 bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
1925 for c := range p.dirtiers {
1927 bannableTouchers = append(bannableTouchers, c)
1930 t.clearPieceTouchers(piece)
1931 slices.Sort(bannableTouchers, connLessTrusted)
1933 if t.cl.config.Debug {
1935 "bannable conns by trust for piece %d: %v",
1937 func() (ret []connectionTrust) {
1938 for _, c := range bannableTouchers {
1939 ret = append(ret, c.trust())
1946 if len(bannableTouchers) >= 1 {
1947 c := bannableTouchers[0]
1948 t.cl.banPeerIP(c.remoteIp())
1952 t.onIncompletePiece(piece)
1953 p.Storage().MarkNotComplete()
1955 t.updatePieceCompletion(piece)
1958 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
1959 // TODO: Make faster
1960 for cn := range t.conns {
1965 func (t *Torrent) onPieceCompleted(piece pieceIndex) {
1966 t.pendAllChunkSpecs(piece)
1967 t.cancelRequestsForPiece(piece)
1968 t.piece(piece).readerCond.Broadcast()
1969 for conn := range t.conns {
1971 t.maybeDropMutuallyCompletePeer(&conn.Peer)
1975 // Called when a piece is found to be not complete.
1976 func (t *Torrent) onIncompletePiece(piece pieceIndex) {
1977 if t.pieceAllDirty(piece) {
1978 t.pendAllChunkSpecs(piece)
1980 if !t.wantPieceIndex(piece) {
1981 // t.logger.Printf("piece %d incomplete and unwanted", piece)
1984 // We could drop any connections that we told we have a piece that we
1985 // don't here. But there's a test failure, and it seems clients don't care
1986 // if you request pieces that you already claim to have. Pruning bad
1987 // connections might just remove any connections that aren't treating us
1988 // favourably anyway.
1990 // for c := range t.conns {
1991 // if c.sentHave(piece) {
1995 t.iterPeers(func(conn *Peer) {
1996 if conn.peerHasPiece(piece) {
1997 conn.updateRequests()
2002 func (t *Torrent) tryCreateMorePieceHashers() {
2003 for !t.closed.IsSet() && t.activePieceHashes < 2 && t.tryCreatePieceHasher() {
2007 func (t *Torrent) tryCreatePieceHasher() bool {
2008 if t.storage == nil {
2011 pi, ok := t.getPieceToHash()
2016 t.piecesQueuedForHash.Remove(bitmap.BitIndex(pi))
2018 t.publishPieceChange(pi)
2019 t.updatePiecePriority(pi)
2020 t.storageLock.RLock()
2021 t.activePieceHashes++
2022 go t.pieceHasher(pi)
2026 func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
2027 t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
2028 if t.piece(i).hashing {
2038 func (t *Torrent) pieceHasher(index pieceIndex) {
2040 sum, copyErr := t.hashPiece(index)
2041 correct := sum == *p.hash
2045 log.Fmsg("piece %v (%s) hash failure copy error: %v", p, p.hash.HexString(), copyErr).Log(t.logger)
2047 t.storageLock.RUnlock()
2051 t.updatePiecePriority(index)
2052 t.pieceHashed(index, correct, copyErr)
2053 t.publishPieceChange(index)
2054 t.activePieceHashes--
2055 t.tryCreateMorePieceHashers()
2058 // Return the connections that touched a piece, and clear the entries while doing it.
2059 func (t *Torrent) clearPieceTouchers(pi pieceIndex) {
2061 for c := range p.dirtiers {
2062 delete(c.peerTouchedPieces, pi)
2063 delete(p.dirtiers, c)
2067 func (t *Torrent) peersAsSlice() (ret []*Peer) {
2068 t.iterPeers(func(p *Peer) {
2069 ret = append(ret, p)
2074 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
2075 piece := t.piece(pieceIndex)
2076 if piece.queuedForHash() {
2079 t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
2080 t.publishPieceChange(pieceIndex)
2081 t.updatePiecePriority(pieceIndex)
2082 t.tryCreateMorePieceHashers()
2085 // Forces all the pieces to be re-hashed. See also Piece.VerifyData. This should not be called
2086 // before the Info is available.
2087 func (t *Torrent) VerifyData() {
2088 for i := pieceIndex(0); i < t.NumPieces(); i++ {
2089 t.Piece(i).VerifyData()
2093 // Start the process of connecting to the given peer for the given torrent if appropriate.
2094 func (t *Torrent) initiateConn(peer PeerInfo) {
2095 if peer.Id == t.cl.peerID {
2098 if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted {
2102 if t.addrActive(addr.String()) {
2106 t.halfOpen[addr.String()] = peer
2107 go t.cl.outgoingConnection(t, addr, peer.Source, peer.Trusted)
2110 // Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to
2111 // quickly make one Client visible to the Torrent of another Client.
2112 func (t *Torrent) AddClientPeer(cl *Client) int {
2113 return t.AddPeers(func() (ps []PeerInfo) {
2114 for _, la := range cl.ListenAddrs() {
2115 ps = append(ps, PeerInfo{
2124 // All stats that include this Torrent. Useful when we want to increment ConnStats but not for every
2126 func (t *Torrent) allStats(f func(*ConnStats)) {
2131 func (t *Torrent) hashingPiece(i pieceIndex) bool {
2132 return t.pieces[i].hashing
2135 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
2136 return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
2139 func (t *Torrent) dialTimeout() time.Duration {
2140 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
2143 func (t *Torrent) piece(i int) *Piece {
2147 func (t *Torrent) onWriteChunkErr(err error) {
2148 if t.userOnWriteChunkErr != nil {
2149 go t.userOnWriteChunkErr(err)
2152 t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download")
2153 t.disallowDataDownloadLocked()
2156 func (t *Torrent) DisallowDataDownload() {
2157 t.disallowDataDownloadLocked()
2160 func (t *Torrent) disallowDataDownloadLocked() {
2161 t.dataDownloadDisallowed.Set()
2164 func (t *Torrent) AllowDataDownload() {
2165 t.dataDownloadDisallowed.Clear()
2168 // Enables uploading data, if it was disabled.
2169 func (t *Torrent) AllowDataUpload() {
2172 t.dataUploadDisallowed = false
2173 for c := range t.conns {
2178 // Disables uploading data, if it was enabled.
2179 func (t *Torrent) DisallowDataUpload() {
2182 t.dataUploadDisallowed = true
2183 for c := range t.conns {
2188 // Sets a handler that is called if there's an error writing a chunk to local storage. By default,
2189 // or if nil, a critical message is logged, and data download is disabled.
2190 func (t *Torrent) SetOnWriteChunkError(f func(error)) {
2193 t.userOnWriteChunkErr = f
2196 func (t *Torrent) iterPeers(f func(p *Peer)) {
2197 for pc := range t.conns {
2200 for _, ws := range t.webSeeds {
2205 func (t *Torrent) callbacks() *Callbacks {
2206 return &t.cl.config.Callbacks
2209 var WebseedHttpClient = &http.Client{
2210 Transport: &http.Transport{
2211 MaxConnsPerHost: 10,
2215 func (t *Torrent) addWebSeed(url string) {
2216 if t.cl.config.DisableWebseeds {
2219 if _, ok := t.webSeeds[url]; ok {
2222 const maxRequests = 10
2228 reconciledHandshakeStats: true,
2229 // TODO: Raise this limit, and instead limit concurrent fetches.
2230 PeerMaxRequests: 32,
2231 RemoteAddr: remoteAddrFromUrl(url),
2232 callbacks: t.callbacks(),
2234 client: webseed.Client{
2235 // Consider a MaxConnsPerHost in the transport for this, possibly in a global Client.
2236 HttpClient: WebseedHttpClient,
2239 activeRequests: make(map[Request]webseed.Request, maxRequests),
2241 ws.requesterCond.L = t.cl.locker()
2242 for i := 0; i < maxRequests; i += 1 {
2245 for _, f := range t.callbacks().NewPeer {
2248 ws.peer.logger = t.logger.WithContextValue(&ws)
2249 ws.peer.peerImpl = &ws
2251 ws.onGotInfo(t.info)
2253 t.webSeeds[url] = &ws.peer
2254 ws.peer.onPeerHasAllPieces()
2257 func (t *Torrent) peerIsActive(p *Peer) (active bool) {
2258 t.iterPeers(func(p1 *Peer) {
2266 func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request {
2267 index := ri / t.chunksPerRegularPiece()
2270 t.piece(int(index)).chunkIndexSpec(pp.Integer(ri % t.chunksPerRegularPiece())),
2274 func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex {
2275 return t.chunksPerRegularPiece()*uint32(r.Index) + uint32(r.Begin/t.chunkSize)
2278 func (t *Torrent) numChunks() RequestIndex {
2279 return RequestIndex((t.Length() + int64(t.chunkSize) - 1) / int64(t.chunkSize))