20 "github.com/anacrolix/torrent/common"
21 "github.com/anacrolix/torrent/segments"
22 "github.com/anacrolix/torrent/webseed"
23 "github.com/davecgh/go-spew/spew"
24 "github.com/pion/datachannel"
26 "github.com/anacrolix/dht/v2"
27 "github.com/anacrolix/log"
28 "github.com/anacrolix/missinggo"
29 "github.com/anacrolix/missinggo/iter"
30 "github.com/anacrolix/missinggo/perf"
31 "github.com/anacrolix/missinggo/pubsub"
32 "github.com/anacrolix/missinggo/slices"
33 "github.com/anacrolix/missinggo/v2/bitmap"
34 "github.com/anacrolix/missinggo/v2/prioritybitmap"
36 "github.com/anacrolix/torrent/bencode"
37 "github.com/anacrolix/torrent/metainfo"
38 pp "github.com/anacrolix/torrent/peer_protocol"
39 "github.com/anacrolix/torrent/storage"
40 "github.com/anacrolix/torrent/tracker"
41 "github.com/anacrolix/torrent/webtorrent"
44 // Maintains state of torrent within a Client. Many methods should not be called before the info is
45 // available, see .Info and .GotInfo.
47 // Torrent-level aggregate statistics. First in struct to ensure 64-bit
48 // alignment. See #262.
53 networkingEnabled bool
54 dataDownloadDisallowed bool
55 dataUploadDisallowed bool
56 userOnWriteChunkErr func(error)
58 // Determines what chunks to request from peers.
59 requestStrategy requestStrategy
61 closed missinggo.Event
62 infoHash metainfo.Hash
64 // Values are the piece indices that changed.
65 pieceStateChanges *pubsub.PubSub
66 // The size of chunks to request from peers over the wire. This is
67 // normally 16KiB by convention these days.
70 // Total length of the torrent in bytes. Stored because it's not O(1) to
71 // get this from the info dict.
74 // The storage to open when the info dict becomes available.
75 storageOpener *storage.Client
76 // Storage for torrent data.
77 storage *storage.Torrent
78 // Read-locked for using storage, and write-locked for Closing.
79 storageLock sync.RWMutex
81 // TODO: Only announce stuff is used?
82 metainfo metainfo.MetaInfo
84 // The info dict. nil if we don't have it (yet).
86 fileIndex segments.Index
89 webSeeds map[string]*Peer
91 // Active peer connections, running message stream loops. TODO: Make this
92 // open (not-closed) connections only.
93 conns map[*PeerConn]struct{}
94 maxEstablishedConns int
95 // Set of addrs to which we're attempting to connect. Connections are
96 // half-open until all handshakes are completed.
97 halfOpen map[string]PeerInfo
100 // Reserve of peers to connect to. A peer can be both here and in the
101 // active connections if were told about the peer after connecting with
102 // them. That encourages us to reconnect to peers that are well known in
104 peers prioritizedPeers
105 // Whether we want to know to know more peers.
106 wantPeersEvent missinggo.Event
107 // An announcer for each tracker URL.
108 trackerAnnouncers map[string]torrentTrackerAnnouncer
109 // How many times we've initiated a DHT announce. TODO: Move into stats.
112 // Name used if the info name isn't available. Should be cleared when the
113 // Info does become available.
117 // The bencoded bytes of the info dict. This is actively manipulated if
118 // the info bytes aren't initially available, and we try to fetch them
121 // Each element corresponds to the 16KiB metadata pieces. If true, we have
122 // received that piece.
123 metadataCompletedChunks []bool
124 metadataChanged sync.Cond
126 // Set when .Info is obtained.
127 gotMetainfo missinggo.Event
129 readers map[*reader]struct{}
130 _readerNowPieces bitmap.Bitmap
131 _readerReadaheadPieces bitmap.Bitmap
133 // A cache of pieces we need to get. Calculated from various piece and
134 // file priorities and completion states elsewhere.
135 _pendingPieces prioritybitmap.PriorityBitmap
136 // A cache of completed piece indices.
137 _completedPieces bitmap.Bitmap
138 // Pieces that need to be hashed.
139 piecesQueuedForHash bitmap.Bitmap
140 activePieceHashes int
142 // A pool of piece priorities []int for assignment to new connections.
143 // These "inclinations" are used to give connections preference for
145 connPieceInclinationPool sync.Pool
147 // Count of each request across active connections.
148 pendingRequests map[Request]int
153 func (t *Torrent) numConns() int {
157 func (t *Torrent) numReaders() int {
158 return len(t.readers)
161 func (t *Torrent) readerNowPieces() bitmap.Bitmap {
162 return t._readerNowPieces
165 func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
166 return t._readerReadaheadPieces
169 func (t *Torrent) ignorePieces() bitmap.Bitmap {
170 ret := t._completedPieces.Copy()
171 ret.Union(t.piecesQueuedForHash)
172 for i := 0; i < t.numPieces(); i++ {
173 if t.piece(i).hashing {
180 func (t *Torrent) pendingPieces() *prioritybitmap.PriorityBitmap {
181 return &t._pendingPieces
184 func (t *Torrent) tickleReaders() {
185 t.cl.event.Broadcast()
188 // Returns a channel that is closed when the Torrent is closed.
189 func (t *Torrent) Closed() <-chan struct{} {
190 return t.closed.LockedChan(t.cl.locker())
193 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
194 // pending, and half-open peers.
195 func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
196 // Add pending peers to the list
197 t.peers.Each(func(peer PeerInfo) {
198 ks = append(ks, peer)
201 // Add half-open peers to the list
202 for _, peer := range t.halfOpen {
203 ks = append(ks, peer)
206 // Add active peers to the list
207 for conn := range t.conns {
209 ks = append(ks, PeerInfo{
211 Addr: conn.RemoteAddr,
212 Source: conn.Discovery,
213 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
214 // > But if we're not connected to them with an encrypted connection, I couldn't say
215 // > what's appropriate. We can carry forward the SupportsEncryption value as we
216 // > received it from trackers/DHT/PEX, or just use the encryption state for the
217 // > connection. It's probably easiest to do the latter for now.
218 // https://github.com/anacrolix/torrent/pull/188
219 SupportsEncryption: conn.headerEncrypted,
226 func (t *Torrent) setChunkSize(size pp.Integer) {
228 t.chunkPool = &sync.Pool{
229 New: func() interface{} {
230 b := make([]byte, size)
236 func (t *Torrent) pieceComplete(piece pieceIndex) bool {
237 return t._completedPieces.Get(bitmap.BitIndex(piece))
240 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
241 return t.pieces[piece].Storage().Completion()
244 // There's a connection to that address already.
245 func (t *Torrent) addrActive(addr string) bool {
246 if _, ok := t.halfOpen[addr]; ok {
249 for c := range t.conns {
251 if ra.String() == addr {
258 func (t *Torrent) unclosedConnsAsSlice() (ret []*PeerConn) {
259 ret = make([]*PeerConn, 0, len(t.conns))
260 for c := range t.conns {
261 if !c.closed.IsSet() {
268 func (t *Torrent) addPeer(p PeerInfo) (added bool) {
270 torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1)
271 if t.closed.IsSet() {
274 if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok {
275 if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) {
276 torrent.Add("peers not added because of bad addr", 1)
277 // cl.logger.Printf("peers not added because of bad addr: %v", p)
281 if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok {
282 torrent.Add("peers replaced", 1)
283 if !replaced.equal(p) {
284 t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced)
291 for t.peers.Len() > cl.config.TorrentPeersHighWater {
292 _, ok := t.peers.DeleteMin()
294 torrent.Add("excess reserve peers discarded", 1)
300 func (t *Torrent) invalidateMetadata() {
301 for i := range t.metadataCompletedChunks {
302 t.metadataCompletedChunks[i] = false
309 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
313 if index >= len(t.metadataCompletedChunks) {
314 t.logger.Printf("%s: ignoring metadata piece %d", t, index)
317 copy(t.metadataBytes[(1<<14)*index:], data)
318 t.metadataCompletedChunks[index] = true
321 func (t *Torrent) metadataPieceCount() int {
322 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
325 func (t *Torrent) haveMetadataPiece(piece int) bool {
327 return (1<<14)*piece < len(t.metadataBytes)
329 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
333 func (t *Torrent) metadataSize() int {
334 return len(t.metadataBytes)
337 func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
338 for i := 0; i < len(info.Pieces); i += sha1.Size {
339 ret = append(ret, info.Pieces[i:i+sha1.Size])
344 func (t *Torrent) makePieces() {
345 hashes := infoPieceHashes(t.info)
346 t.pieces = make([]Piece, len(hashes))
347 for i, hash := range hashes {
348 piece := &t.pieces[i]
350 piece.index = pieceIndex(i)
351 piece.noPendingWrites.L = &piece.pendingWritesMutex
352 piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
354 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
355 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
356 piece.files = files[beginFile:endFile]
360 // Returns the index of the first file containing the piece. files must be
361 // ordered by offset.
362 func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
363 for i, f := range files {
364 if f.offset+f.length > pieceOffset {
371 // Returns the index after the last file containing the piece. files must be
372 // ordered by offset.
373 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
374 for i, f := range files {
375 if f.offset+f.length >= pieceEndOffset {
382 func (t *Torrent) cacheLength() {
384 for _, f := range t.info.UpvertedFiles() {
390 // TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
392 func (t *Torrent) setInfo(info *metainfo.Info) error {
393 if err := validateInfo(info); err != nil {
394 return fmt.Errorf("bad info: %s", err)
396 if t.storageOpener != nil {
398 t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
400 return fmt.Errorf("error opening torrent storage: %s", err)
406 t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
407 t.displayName = "" // Save a few bytes lol.
414 // This seems to be all the follow-up tasks after info is set, that can't fail.
415 func (t *Torrent) onSetInfo() {
416 t.iterPeers(func(p *Peer) {
419 for i := range t.pieces {
420 t.updatePieceCompletion(pieceIndex(i))
422 if !p.storageCompletionOk {
423 // t.logger.Printf("piece %s completion unknown, queueing check", p)
424 t.queuePieceCheck(pieceIndex(i))
427 t.cl.event.Broadcast()
429 t.updateWantPeersEvent()
430 t.pendingRequests = make(map[Request]int)
431 t.tryCreateMorePieceHashers()
434 // Called when metadata for a torrent becomes available.
435 func (t *Torrent) setInfoBytes(b []byte) error {
436 if metainfo.HashBytes(b) != t.infoHash {
437 return errors.New("info bytes have wrong hash")
439 var info metainfo.Info
440 if err := bencode.Unmarshal(b, &info); err != nil {
441 return fmt.Errorf("error unmarshalling info bytes: %s", err)
444 t.metadataCompletedChunks = nil
448 if err := t.setInfo(&info); err != nil {
455 func (t *Torrent) haveAllMetadataPieces() bool {
459 if t.metadataCompletedChunks == nil {
462 for _, have := range t.metadataCompletedChunks {
470 // TODO: Propagate errors to disconnect peer.
471 func (t *Torrent) setMetadataSize(bytes int) (err error) {
473 // We already know the correct metadata size.
476 if bytes <= 0 || bytes > 10000000 { // 10MB, pulled from my ass.
477 return errors.New("bad size")
479 if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
482 t.metadataBytes = make([]byte, bytes)
483 t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
484 t.metadataChanged.Broadcast()
485 for c := range t.conns {
486 c.requestPendingMetadata()
491 // The current working name for the torrent. Either the name in the info dict,
492 // or a display name given such as by the dn value in a magnet link, or "".
493 func (t *Torrent) name() string {
495 defer t.nameMu.RUnlock()
499 if t.displayName != "" {
502 return "infohash:" + t.infoHash.HexString()
505 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
506 p := &t.pieces[index]
507 ret.Priority = t.piecePriority(index)
508 ret.Completion = p.completion()
509 ret.QueuedForHash = p.queuedForHash()
510 ret.Hashing = p.hashing
511 ret.Checking = ret.QueuedForHash || ret.Hashing
512 ret.Marking = p.marking
513 if !ret.Complete && t.piecePartiallyDownloaded(index) {
519 func (t *Torrent) metadataPieceSize(piece int) int {
520 return metadataPieceSize(len(t.metadataBytes), piece)
523 func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType int, piece int, data []byte) pp.Message {
529 d["total_size"] = len(t.metadataBytes)
531 p := bencode.MustMarshal(d)
534 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
535 ExtendedPayload: append(p, data...),
539 func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
540 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
541 ret = append(ret, PieceStateRun{
542 PieceState: el.(PieceState),
546 for index := range t.pieces {
547 rle.Append(t.pieceState(pieceIndex(index)), 1)
553 // Produces a small string representing a PieceStateRun.
554 func (psr PieceStateRun) String() (ret string) {
555 ret = fmt.Sprintf("%d", psr.Length)
556 ret += func() string {
557 switch psr.Priority {
558 case PiecePriorityNext:
560 case PiecePriorityNormal:
562 case PiecePriorityReadahead:
564 case PiecePriorityNow:
566 case PiecePriorityHigh:
575 if psr.QueuedForHash {
593 func (t *Torrent) writeStatus(w io.Writer) {
594 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
595 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
597 fmt.Fprintf(w, "Metadata have: ")
598 for _, h := range t.metadataCompletedChunks {
599 fmt.Fprintf(w, "%c", func() rune {
609 fmt.Fprintf(w, "Piece length: %s\n", func() string {
611 return fmt.Sprint(t.usualPieceSize())
617 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
618 fmt.Fprintf(w, "Piece States: %s", t.pieceStateRuns())
621 fmt.Fprintf(w, "Reader Pieces:")
622 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
623 fmt.Fprintf(w, " %d:%d", begin, end)
628 fmt.Fprintf(w, "Enabled trackers:\n")
630 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
631 fmt.Fprintf(tw, " URL\tExtra\n")
632 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r torrentTrackerAnnouncer) bool {
635 var luns, runs url.URL = *lu, *ru
638 var ml missinggo.MultiLess
639 ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
640 ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
642 }).([]torrentTrackerAnnouncer) {
643 fmt.Fprintf(tw, " %q\t%v\n", ta.URL(), ta.statusLine())
648 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
650 spew.NewDefaultConfig()
651 spew.Fdump(w, t.statsLocked())
653 peers := t.peersAsSlice()
654 sort.Slice(peers, func(i, j int) bool {
655 return worseConn(peers[i], peers[j])
657 for i, c := range peers {
658 fmt.Fprintf(w, "%2d. ", i+1)
663 func (t *Torrent) haveInfo() bool {
667 // Returns a run-time generated MetaInfo that includes the info bytes and
668 // announce-list as currently known to the client.
669 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
670 return metainfo.MetaInfo{
671 CreationDate: time.Now().Unix(),
672 Comment: "dynamic metainfo from client",
673 CreatedBy: "go.torrent",
674 AnnounceList: t.metainfo.UpvertedAnnounceList().Clone(),
675 InfoBytes: func() []byte {
677 return t.metadataBytes
682 UrlList: func() []string {
683 ret := make([]string, 0, len(t.webSeeds))
684 for url := range t.webSeeds {
685 ret = append(ret, url)
692 func (t *Torrent) BytesMissing() int64 {
695 return t.bytesMissingLocked()
698 func (t *Torrent) bytesMissingLocked() int64 {
702 func (t *Torrent) bytesLeft() (left int64) {
703 bitmap.Flip(t._completedPieces, 0, bitmap.BitIndex(t.numPieces())).IterTyped(func(piece int) bool {
704 p := &t.pieces[piece]
705 left += int64(p.length() - p.numDirtyBytes())
711 // Bytes left to give in tracker announces.
712 func (t *Torrent) bytesLeftAnnounce() int64 {
720 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
721 if t.pieceComplete(piece) {
724 if t.pieceAllDirty(piece) {
727 return t.pieces[piece].hasDirtyChunks()
730 func (t *Torrent) usualPieceSize() int {
731 return int(t.info.PieceLength)
734 func (t *Torrent) numPieces() pieceIndex {
735 return pieceIndex(t.info.NumPieces())
738 func (t *Torrent) numPiecesCompleted() (num int) {
739 return t._completedPieces.Len()
742 func (t *Torrent) close() (err error) {
745 if t.storage != nil {
748 defer t.storageLock.Unlock()
752 t.iterPeers(func(p *Peer) {
756 t.cl.event.Broadcast()
757 t.pieceStateChanges.Close()
758 t.updateWantPeersEvent()
762 func (t *Torrent) requestOffset(r Request) int64 {
763 return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
766 // Return the request that would include the given offset into the torrent data. Returns !ok if
767 // there is no such request.
768 func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
769 return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
772 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
773 defer perf.ScopeTimerErr(&err)()
774 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
775 if err == nil && n != len(data) {
776 err = io.ErrShortWrite
781 func (t *Torrent) bitfield() (bf []bool) {
782 bf = make([]bool, t.numPieces())
783 t._completedPieces.IterTyped(func(piece int) (again bool) {
790 func (t *Torrent) pieceNumChunks(piece pieceIndex) pp.Integer {
791 return (t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize
794 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
795 t.pieces[pieceIndex]._dirtyChunks.Clear()
798 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
799 if t.info.PieceLength == 0 {
800 // There will be no variance amongst pieces. Only pain.
803 if piece == t.numPieces()-1 {
804 ret := pp.Integer(*t.length % t.info.PieceLength)
809 return pp.Integer(t.info.PieceLength)
812 func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash, err error) {
814 p.waitNoPendingWrites()
815 storagePiece := t.pieces[piece].Storage()
817 //Does the backend want to do its own hashing?
818 if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
819 var sum metainfo.Hash
820 //log.Printf("A piece decided to self-hash: %d", piece)
821 sum, err = i.SelfHash()
822 missinggo.CopyExact(&ret, sum)
826 hash := pieceHash.New()
827 const logPieceContents = false
828 if logPieceContents {
829 var examineBuf bytes.Buffer
830 _, err = storagePiece.WriteTo(io.MultiWriter(hash, &examineBuf))
831 log.Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
833 _, err = storagePiece.WriteTo(hash)
835 missinggo.CopyExact(&ret, hash.Sum(nil))
839 func (t *Torrent) haveAnyPieces() bool {
840 return t._completedPieces.Len() != 0
843 func (t *Torrent) haveAllPieces() bool {
847 return t._completedPieces.Len() == bitmap.BitIndex(t.numPieces())
850 func (t *Torrent) havePiece(index pieceIndex) bool {
851 return t.haveInfo() && t.pieceComplete(index)
854 func (t *Torrent) maybeDropMutuallyCompletePeer(
855 // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's okay?
858 if !t.cl.config.DropMutuallyCompletePeers {
861 if !t.haveAllPieces() {
864 if all, known := p.peerHasAllPieces(); !(known && all) {
870 t.logger.WithDefaultLevel(log.Debug).Printf("dropping %v, which is mutually complete", p)
874 func (t *Torrent) haveChunk(r Request) (ret bool) {
876 // log.Println("have chunk", r, ret)
881 if t.pieceComplete(pieceIndex(r.Index)) {
884 p := &t.pieces[r.Index]
885 return !p.pendingChunk(r.ChunkSpec, t.chunkSize)
888 func chunkIndex(cs ChunkSpec, chunkSize pp.Integer) int {
889 return int(cs.Begin / chunkSize)
892 func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
896 if index < 0 || index >= t.numPieces() {
899 p := &t.pieces[index]
900 if p.queuedForHash() {
906 if t.pieceComplete(index) {
909 if t._pendingPieces.Contains(bitmap.BitIndex(index)) {
912 // t.logger.Printf("piece %d not pending", index)
913 return !t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
914 return index < begin || index >= end
918 // The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
919 // connection is one that usually sends us unwanted pieces, or has been in worser half of the
920 // established connections for more than a minute.
921 func (t *Torrent) worstBadConn() *PeerConn {
922 wcs := worseConnSlice{t.unclosedConnsAsSlice()}
925 c := heap.Pop(&wcs).(*PeerConn)
926 if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
929 // If the connection is in the worst half of the established
930 // connection quota and is older than a minute.
931 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
932 // Give connections 1 minute to prove themselves.
933 if time.Since(c.completedHandshake) > time.Minute {
941 type PieceStateChange struct {
946 func (t *Torrent) publishPieceChange(piece pieceIndex) {
947 t.cl._mu.Defer(func() {
948 cur := t.pieceState(piece)
949 p := &t.pieces[piece]
950 if cur != p.publicPieceState {
951 p.publicPieceState = cur
952 t.pieceStateChanges.Publish(PieceStateChange{
960 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
961 if t.pieceComplete(piece) {
964 return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
967 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
968 return t.pieces[piece]._dirtyChunks.Len() == int(t.pieceNumChunks(piece))
971 func (t *Torrent) readersChanged() {
972 t.updateReaderPieces()
973 t.updateAllPiecePriorities()
976 func (t *Torrent) updateReaderPieces() {
977 t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
980 func (t *Torrent) readerPosChanged(from, to pieceRange) {
984 t.updateReaderPieces()
985 // Order the ranges, high and low.
987 if l.begin > h.begin {
991 // Two distinct ranges.
992 t.updatePiecePriorities(l.begin, l.end)
993 t.updatePiecePriorities(h.begin, h.end)
1000 t.updatePiecePriorities(l.begin, end)
1004 func (t *Torrent) maybeNewConns() {
1005 // Tickle the accept routine.
1006 t.cl.event.Broadcast()
1010 func (t *Torrent) piecePriorityChanged(piece pieceIndex) {
1011 // t.logger.Printf("piece %d priority changed", piece)
1012 t.iterPeers(func(c *Peer) {
1013 if c.updatePiecePriority(piece) {
1014 // log.Print("conn piece priority changed")
1019 t.publishPieceChange(piece)
1022 func (t *Torrent) updatePiecePriority(piece pieceIndex) {
1023 p := &t.pieces[piece]
1024 newPrio := p.uncachedPriority()
1025 // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
1026 if newPrio == PiecePriorityNone {
1027 if !t._pendingPieces.Remove(bitmap.BitIndex(piece)) {
1031 if !t._pendingPieces.Set(bitmap.BitIndex(piece), newPrio.BitmapPriority()) {
1035 t.piecePriorityChanged(piece)
1038 func (t *Torrent) updateAllPiecePriorities() {
1039 t.updatePiecePriorities(0, t.numPieces())
1042 // Update all piece priorities in one hit. This function should have the same
1043 // output as updatePiecePriority, but across all pieces.
1044 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex) {
1045 for i := begin; i < end; i++ {
1046 t.updatePiecePriority(i)
1050 // Returns the range of pieces [begin, end) that contains the extent of bytes.
1051 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
1052 if off >= *t.length {
1062 begin = pieceIndex(off / t.info.PieceLength)
1063 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
1064 if end > pieceIndex(t.info.NumPieces()) {
1065 end = pieceIndex(t.info.NumPieces())
1070 // Returns true if all iterations complete without breaking. Returns the read
1071 // regions for all readers. The reader regions should not be merged as some
1072 // callers depend on this method to enumerate readers.
1073 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
1074 for r := range t.readers {
1076 if p.begin >= p.end {
1079 if !f(p.begin, p.end) {
1086 func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
1087 prio, ok := t._pendingPieces.GetPriority(bitmap.BitIndex(piece))
1089 return PiecePriorityNone
1094 ret := piecePriority(-prio)
1095 if ret == PiecePriorityNone {
1101 func (t *Torrent) pendRequest(req Request) {
1102 ci := chunkIndex(req.ChunkSpec, t.chunkSize)
1103 t.pieces[req.Index].pendChunkIndex(ci)
1106 func (t *Torrent) pieceCompletionChanged(piece pieceIndex) {
1108 t.cl.event.Broadcast()
1109 if t.pieceComplete(piece) {
1110 t.onPieceCompleted(piece)
1112 t.onIncompletePiece(piece)
1114 t.updatePiecePriority(piece)
1117 func (t *Torrent) numReceivedConns() (ret int) {
1118 for c := range t.conns {
1119 if c.Discovery == PeerSourceIncoming {
1126 func (t *Torrent) maxHalfOpen() int {
1127 // Note that if we somehow exceed the maximum established conns, we want
1128 // the negative value to have an effect.
1129 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
1130 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
1131 // We want to allow some experimentation with new peers, and to try to
1132 // upset an oversupply of received connections.
1133 return int(min(max(5, extraIncoming)+establishedHeadroom, int64(t.cl.config.HalfOpenConnsPerTorrent)))
1136 func (t *Torrent) openNewConns() (initiated int) {
1137 defer t.updateWantPeersEvent()
1138 for t.peers.Len() != 0 {
1142 if len(t.halfOpen) >= t.maxHalfOpen() {
1145 if len(t.cl.dialers) == 0 {
1148 if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns {
1151 p := t.peers.PopMax()
1158 func (t *Torrent) getConnPieceInclination() []int {
1159 _ret := t.connPieceInclinationPool.Get()
1161 pieceInclinationsNew.Add(1)
1162 return rand.Perm(int(t.numPieces()))
1164 pieceInclinationsReused.Add(1)
1165 return *_ret.(*[]int)
1168 func (t *Torrent) putPieceInclination(pi []int) {
1169 t.connPieceInclinationPool.Put(&pi)
1170 pieceInclinationsPut.Add(1)
1173 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
1175 uncached := t.pieceCompleteUncached(piece)
1176 cached := p.completion()
1177 changed := cached != uncached
1178 complete := uncached.Complete
1179 p.storageCompletionOk = uncached.Ok
1180 t._completedPieces.Set(bitmap.BitIndex(piece), complete)
1181 if complete && len(p.dirtiers) != 0 {
1182 t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
1185 log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).SetLevel(log.Debug).Log(t.logger)
1186 t.pieceCompletionChanged(piece)
1191 // Non-blocking read. Client lock is not required.
1192 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1194 p := &t.pieces[off/t.info.PieceLength]
1195 p.waitNoPendingWrites()
1197 n1, err = p.Storage().ReadAt(b, off-p.Info().Offset())
1208 // Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on
1209 // the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage
1210 // etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately.
1211 func (t *Torrent) maybeCompleteMetadata() error {
1216 if !t.haveAllMetadataPieces() {
1217 // Don't have enough metadata pieces.
1220 err := t.setInfoBytes(t.metadataBytes)
1222 t.invalidateMetadata()
1223 return fmt.Errorf("error setting info bytes: %s", err)
1225 if t.cl.config.Debug {
1226 t.logger.Printf("%s: got metadata from peers", t)
1231 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1232 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
1234 now.Add(bitmap.BitIndex(begin))
1235 readahead.AddRange(bitmap.BitIndex(begin)+1, bitmap.BitIndex(end))
1242 func (t *Torrent) needData() bool {
1243 if t.closed.IsSet() {
1249 return t._pendingPieces.Len() != 0
1252 func appendMissingStrings(old, new []string) (ret []string) {
1255 for _, n := range new {
1256 for _, o := range old {
1261 ret = append(ret, n)
1266 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1268 for minNumTiers > len(ret) {
1269 ret = append(ret, nil)
1274 func (t *Torrent) addTrackers(announceList [][]string) {
1275 fullAnnounceList := &t.metainfo.AnnounceList
1276 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1277 for tierIndex, trackerURLs := range announceList {
1278 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1280 t.startMissingTrackerScrapers()
1281 t.updateWantPeersEvent()
1284 // Don't call this before the info is available.
1285 func (t *Torrent) bytesCompleted() int64 {
1289 return t.info.TotalLength() - t.bytesLeft()
1292 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1295 return t.setInfoBytes(b)
1298 // Returns true if connection is removed from torrent.Conns.
1299 func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
1300 if !c.closed.IsSet() {
1301 panic("connection is not closed")
1302 // There are behaviours prevented by the closed state that will fail
1303 // if the connection has been deleted.
1307 // Avoid adding a drop event more than once. Probably we should track whether we've generated
1308 // the drop event against the PexConnState instead.
1310 if !t.cl.config.DisablePEX {
1314 torrent.Add("deleted connections", 1)
1315 c.deleteAllRequests()
1316 if t.numActivePeers() == 0 {
1317 t.assertNoPendingRequests()
1322 func (t *Torrent) numActivePeers() (num int) {
1323 t.iterPeers(func(*Peer) {
1329 func (t *Torrent) assertNoPendingRequests() {
1330 if len(t.pendingRequests) != 0 {
1331 panic(t.pendingRequests)
1333 //if len(t.lastRequested) != 0 {
1334 // panic(t.lastRequested)
1338 func (t *Torrent) dropConnection(c *PeerConn) {
1339 t.cl.event.Broadcast()
1341 if t.deletePeerConn(c) {
1346 func (t *Torrent) wantPeers() bool {
1347 if t.closed.IsSet() {
1350 if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
1353 return t.needData() || t.seeding()
1356 func (t *Torrent) updateWantPeersEvent() {
1358 t.wantPeersEvent.Set()
1360 t.wantPeersEvent.Clear()
1364 // Returns whether the client should make effort to seed the torrent.
1365 func (t *Torrent) seeding() bool {
1367 if t.closed.IsSet() {
1370 if t.dataUploadDisallowed {
1373 if cl.config.NoUpload {
1376 if !cl.config.Seed {
1379 if cl.config.DisableAggressiveUpload && t.needData() {
1385 func (t *Torrent) onWebRtcConn(
1386 c datachannel.ReadWriteCloser,
1387 dcc webtorrent.DataChannelContext,
1390 pc, err := t.cl.initiateProtocolHandshakes(
1391 context.Background(),
1392 webrtcNetConn{c, dcc},
1396 webrtcNetAddr{dcc.Remote},
1398 fmt.Sprintf("webrtc offer_id %x", dcc.OfferId),
1401 t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
1404 if dcc.LocalOffered {
1405 pc.Discovery = PeerSourceTracker
1407 pc.Discovery = PeerSourceIncoming
1411 err = t.cl.runHandshookConn(pc, t)
1413 t.logger.WithDefaultLevel(log.Critical).Printf("error running handshook webrtc conn: %v", err)
1417 func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
1418 err := t.cl.runHandshookConn(pc, t)
1419 if err != nil || logAll {
1420 t.logger.WithDefaultLevel(level).Printf("error running handshook conn: %v", err)
1424 func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
1425 t.logRunHandshookConn(pc, false, log.Debug)
1428 func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
1429 wtc, release := t.cl.websocketTrackers.Get(u.String())
1431 <-t.closed.LockedChan(t.cl.locker())
1434 wst := websocketTrackerStatus{u, wtc}
1436 err := wtc.Announce(tracker.Started, t.infoHash)
1438 t.logger.WithDefaultLevel(log.Warning).Printf(
1439 "error in initial announce to %q: %v",
1448 func (t *Torrent) startScrapingTracker(_url string) {
1452 u, err := url.Parse(_url)
1454 // URLs with a leading '*' appear to be a uTorrent convention to
1455 // disable trackers.
1457 log.Str("error parsing tracker url").AddValues("url", _url).Log(t.logger)
1461 if u.Scheme == "udp" {
1463 t.startScrapingTracker(u.String())
1465 t.startScrapingTracker(u.String())
1468 if _, ok := t.trackerAnnouncers[_url]; ok {
1471 sl := func() torrentTrackerAnnouncer {
1474 if t.cl.config.DisableWebtorrent {
1477 return t.startWebsocketAnnouncer(*u)
1479 if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 {
1483 if t.cl.config.DisableIPv6 {
1487 newAnnouncer := &trackerScraper{
1491 go newAnnouncer.Run()
1497 if t.trackerAnnouncers == nil {
1498 t.trackerAnnouncers = make(map[string]torrentTrackerAnnouncer)
1500 t.trackerAnnouncers[_url] = sl
1503 // Adds and starts tracker scrapers for tracker URLs that aren't already
1505 func (t *Torrent) startMissingTrackerScrapers() {
1506 if t.cl.config.DisableTrackers {
1509 t.startScrapingTracker(t.metainfo.Announce)
1510 for _, tier := range t.metainfo.AnnounceList {
1511 for _, url := range tier {
1512 t.startScrapingTracker(url)
1517 // Returns an AnnounceRequest with fields filled out to defaults and current
1519 func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
1520 // Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's
1521 // dependent on the network in use.
1522 return tracker.AnnounceRequest{
1524 NumWant: func() int32 {
1525 if t.wantPeers() && len(t.cl.dialers) > 0 {
1531 Port: uint16(t.cl.incomingPeerPort()),
1532 PeerId: t.cl.peerID,
1533 InfoHash: t.infoHash,
1534 Key: t.cl.announceKey(),
1536 // The following are vaguely described in BEP 3.
1538 Left: t.bytesLeftAnnounce(),
1539 Uploaded: t.stats.BytesWrittenData.Int64(),
1540 // There's no mention of wasted or unwanted download in the BEP.
1541 Downloaded: t.stats.BytesReadUsefulData.Int64(),
1545 // Adds peers revealed in an announce until the announce ends, or we have
1547 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
1549 for v := range pvs {
1552 for _, cp := range v.Peers {
1554 // Can't do anything with this.
1557 if t.addPeer(PeerInfo{
1558 Addr: ipPortAddr{cp.IP, cp.Port},
1559 Source: PeerSourceDhtGetPeers,
1566 //log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
1571 func (t *Torrent) announceToDht(impliedPort bool, s DhtServer) error {
1572 ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), impliedPort)
1576 go t.consumeDhtAnnouncePeers(ps.Peers())
1578 case <-t.closed.LockedChan(t.cl.locker()):
1579 case <-time.After(5 * time.Minute):
1585 func (t *Torrent) dhtAnnouncer(s DhtServer) {
1591 if t.closed.IsSet() {
1597 // TODO: Determine if there's a listener on the port we're announcing.
1598 if len(cl.dialers) == 0 && len(cl.listeners) == 0 {
1609 err := t.announceToDht(true, s)
1611 t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err)
1617 func (t *Torrent) addPeers(peers []PeerInfo) (added int) {
1618 for _, p := range peers {
1626 // The returned TorrentStats may require alignment in memory. See
1627 // https://github.com/anacrolix/torrent/issues/383.
1628 func (t *Torrent) Stats() TorrentStats {
1630 defer t.cl.rUnlock()
1631 return t.statsLocked()
1634 func (t *Torrent) statsLocked() (ret TorrentStats) {
1635 ret.ActivePeers = len(t.conns)
1636 ret.HalfOpenPeers = len(t.halfOpen)
1637 ret.PendingPeers = t.peers.Len()
1638 ret.TotalPeers = t.numTotalPeers()
1639 ret.ConnectedSeeders = 0
1640 for c := range t.conns {
1641 if all, ok := c.peerHasAllPieces(); all && ok {
1642 ret.ConnectedSeeders++
1645 ret.ConnStats = t.stats.Copy()
1649 // The total number of peers in the torrent.
1650 func (t *Torrent) numTotalPeers() int {
1651 peers := make(map[string]struct{})
1652 for conn := range t.conns {
1653 ra := conn.conn.RemoteAddr()
1655 // It's been closed and doesn't support RemoteAddr.
1658 peers[ra.String()] = struct{}{}
1660 for addr := range t.halfOpen {
1661 peers[addr] = struct{}{}
1663 t.peers.Each(func(peer PeerInfo) {
1664 peers[peer.Addr.String()] = struct{}{}
1669 // Reconcile bytes transferred before connection was associated with a
1671 func (t *Torrent) reconcileHandshakeStats(c *PeerConn) {
1672 if c._stats != (ConnStats{
1673 // Handshakes should only increment these fields:
1674 BytesWritten: c._stats.BytesWritten,
1675 BytesRead: c._stats.BytesRead,
1679 c.postHandshakeStats(func(cs *ConnStats) {
1680 cs.BytesRead.Add(c._stats.BytesRead.Int64())
1681 cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
1683 c.reconciledHandshakeStats = true
1686 // Returns true if the connection is added.
1687 func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
1690 torrent.Add("added connections", 1)
1693 if t.closed.IsSet() {
1694 return errors.New("torrent closed")
1696 for c0 := range t.conns {
1697 if c.PeerID != c0.PeerID {
1700 if !t.cl.config.DropDuplicatePeerIds {
1703 if left, ok := c.hasPreferredNetworkOver(c0); ok && left {
1705 t.deletePeerConn(c0)
1707 return errors.New("existing connection preferred")
1710 if len(t.conns) >= t.maxEstablishedConns {
1711 c := t.worstBadConn()
1713 return errors.New("don't want conns")
1718 if len(t.conns) >= t.maxEstablishedConns {
1721 t.conns[c] = struct{}{}
1722 if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() {
1723 t.pex.Add(c) // as no further extended handshake expected
1728 func (t *Torrent) wantConns() bool {
1729 if !t.networkingEnabled {
1732 if t.closed.IsSet() {
1735 if !t.seeding() && !t.needData() {
1738 if len(t.conns) < t.maxEstablishedConns {
1741 return t.worstBadConn() != nil
1744 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1747 oldMax = t.maxEstablishedConns
1748 t.maxEstablishedConns = max
1749 wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), func(l, r *PeerConn) bool {
1750 return worseConn(&l.Peer, &r.Peer)
1752 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1753 t.dropConnection(wcs.Pop().(*PeerConn))
1759 func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
1760 t.logger.Log(log.Fstr("hashed piece %d (passed=%t)", piece, passed).SetLevel(log.Debug))
1763 t.cl.event.Broadcast()
1764 if t.closed.IsSet() {
1768 // Don't score the first time a piece is hashed, it could be an initial check.
1769 if p.storageCompletionOk {
1771 pieceHashedCorrect.Add(1)
1773 log.Fmsg("piece %d failed hash: %d connections contributed", piece, len(p.dirtiers)).AddValues(t, p).Log(t.logger)
1774 pieceHashedNotCorrect.Add(1)
1779 t.publishPieceChange(piece)
1782 t.publishPieceChange(piece)
1786 if len(p.dirtiers) != 0 {
1787 // Don't increment stats above connection-level for every involved connection.
1788 t.allStats((*ConnStats).incrementPiecesDirtiedGood)
1790 for c := range p.dirtiers {
1791 c._stats.incrementPiecesDirtiedGood()
1793 t.clearPieceTouchers(piece)
1795 err := p.Storage().MarkComplete()
1797 t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
1801 if t.closed.IsSet() {
1804 t.pendAllChunkSpecs(piece)
1806 if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil {
1807 // Peers contributed to all the data for this piece hash failure, and the failure was
1808 // not due to errors in the storage (such as data being dropped in a cache).
1810 // Increment Torrent and above stats, and then specific connections.
1811 t.allStats((*ConnStats).incrementPiecesDirtiedBad)
1812 for c := range p.dirtiers {
1813 // Y u do dis peer?!
1814 c.stats().incrementPiecesDirtiedBad()
1817 bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
1818 for c := range p.dirtiers {
1820 bannableTouchers = append(bannableTouchers, c)
1823 t.clearPieceTouchers(piece)
1824 slices.Sort(bannableTouchers, connLessTrusted)
1826 if t.cl.config.Debug {
1828 "bannable conns by trust for piece %d: %v",
1830 func() (ret []connectionTrust) {
1831 for _, c := range bannableTouchers {
1832 ret = append(ret, c.trust())
1839 if len(bannableTouchers) >= 1 {
1840 c := bannableTouchers[0]
1841 t.cl.banPeerIP(c.remoteIp())
1845 t.onIncompletePiece(piece)
1846 p.Storage().MarkNotComplete()
1848 t.updatePieceCompletion(piece)
1851 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
1852 // TODO: Make faster
1853 for cn := range t.conns {
1858 func (t *Torrent) onPieceCompleted(piece pieceIndex) {
1859 t.pendAllChunkSpecs(piece)
1860 t.cancelRequestsForPiece(piece)
1861 for conn := range t.conns {
1863 t.maybeDropMutuallyCompletePeer(&conn.Peer)
1867 // Called when a piece is found to be not complete.
1868 func (t *Torrent) onIncompletePiece(piece pieceIndex) {
1869 if t.pieceAllDirty(piece) {
1870 t.pendAllChunkSpecs(piece)
1872 if !t.wantPieceIndex(piece) {
1873 // t.logger.Printf("piece %d incomplete and unwanted", piece)
1876 // We could drop any connections that we told we have a piece that we
1877 // don't here. But there's a test failure, and it seems clients don't care
1878 // if you request pieces that you already claim to have. Pruning bad
1879 // connections might just remove any connections that aren't treating us
1880 // favourably anyway.
1882 // for c := range t.conns {
1883 // if c.sentHave(piece) {
1887 t.iterPeers(func(conn *Peer) {
1888 if conn.peerHasPiece(piece) {
1889 conn.updateRequests()
1894 func (t *Torrent) tryCreateMorePieceHashers() {
1895 for !t.closed.IsSet() && t.activePieceHashes < 2 && t.tryCreatePieceHasher() {
1899 func (t *Torrent) tryCreatePieceHasher() bool {
1900 if t.storage == nil {
1903 pi, ok := t.getPieceToHash()
1908 t.piecesQueuedForHash.Remove(pi)
1910 t.publishPieceChange(pi)
1911 t.updatePiecePriority(pi)
1912 t.storageLock.RLock()
1913 t.activePieceHashes++
1914 go t.pieceHasher(pi)
1918 func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
1919 t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
1920 if t.piece(i).hashing {
1930 func (t *Torrent) pieceHasher(index pieceIndex) {
1932 sum, copyErr := t.hashPiece(index)
1933 correct := sum == *p.hash
1937 log.Fmsg("piece %v (%s) hash failure copy error: %v", p, p.hash.HexString(), copyErr).Log(t.logger)
1939 t.storageLock.RUnlock()
1943 t.updatePiecePriority(index)
1944 t.pieceHashed(index, correct, copyErr)
1945 t.publishPieceChange(index)
1946 t.activePieceHashes--
1947 t.tryCreateMorePieceHashers()
1950 // Return the connections that touched a piece, and clear the entries while doing it.
1951 func (t *Torrent) clearPieceTouchers(pi pieceIndex) {
1953 for c := range p.dirtiers {
1954 delete(c.peerTouchedPieces, pi)
1955 delete(p.dirtiers, c)
1959 func (t *Torrent) peersAsSlice() (ret []*Peer) {
1960 t.iterPeers(func(p *Peer) {
1961 ret = append(ret, p)
1966 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
1967 piece := t.piece(pieceIndex)
1968 if piece.queuedForHash() {
1971 t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
1972 t.publishPieceChange(pieceIndex)
1973 t.updatePiecePriority(pieceIndex)
1974 t.tryCreateMorePieceHashers()
1977 // Forces all the pieces to be re-hashed. See also Piece.VerifyData. This should not be called
1978 // before the Info is available.
1979 func (t *Torrent) VerifyData() {
1980 for i := pieceIndex(0); i < t.NumPieces(); i++ {
1981 t.Piece(i).VerifyData()
1985 // Start the process of connecting to the given peer for the given torrent if appropriate.
1986 func (t *Torrent) initiateConn(peer PeerInfo) {
1987 if peer.Id == t.cl.peerID {
1990 if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted {
1994 if t.addrActive(addr.String()) {
1998 t.halfOpen[addr.String()] = peer
1999 go t.cl.outgoingConnection(t, addr, peer.Source, peer.Trusted)
2002 // Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to
2003 // quickly make one Client visible to the Torrent of another Client.
2004 func (t *Torrent) AddClientPeer(cl *Client) int {
2005 return t.AddPeers(func() (ps []PeerInfo) {
2006 for _, la := range cl.ListenAddrs() {
2007 ps = append(ps, PeerInfo{
2016 // All stats that include this Torrent. Useful when we want to increment ConnStats but not for every
2018 func (t *Torrent) allStats(f func(*ConnStats)) {
2023 func (t *Torrent) hashingPiece(i pieceIndex) bool {
2024 return t.pieces[i].hashing
2027 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
2028 return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
2031 func (t *Torrent) dialTimeout() time.Duration {
2032 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
2035 func (t *Torrent) piece(i int) *Piece {
2039 func (t *Torrent) requestStrategyTorrent() requestStrategyTorrent {
2043 type torrentRequestStrategyCallbacks struct {
2047 func (cb torrentRequestStrategyCallbacks) requestTimedOut(r Request) {
2048 torrent.Add("Request timeouts", 1)
2050 defer cb.t.cl.unlock()
2051 cb.t.iterPeers(func(cn *Peer) {
2052 if cn.peerHasPiece(pieceIndex(r.Index)) {
2059 func (t *Torrent) requestStrategyCallbacks() requestStrategyCallbacks {
2060 return torrentRequestStrategyCallbacks{t}
2063 func (t *Torrent) onWriteChunkErr(err error) {
2064 if t.userOnWriteChunkErr != nil {
2065 go t.userOnWriteChunkErr(err)
2068 t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download")
2069 t.disallowDataDownloadLocked()
2072 func (t *Torrent) DisallowDataDownload() {
2075 t.disallowDataDownloadLocked()
2078 func (t *Torrent) disallowDataDownloadLocked() {
2079 t.dataDownloadDisallowed = true
2080 t.iterPeers(func(c *Peer) {
2086 func (t *Torrent) AllowDataDownload() {
2089 t.dataDownloadDisallowed = false
2091 t.iterPeers(func(c *Peer) {
2096 // Enables uploading data, if it was disabled.
2097 func (t *Torrent) AllowDataUpload() {
2100 t.dataUploadDisallowed = false
2101 for c := range t.conns {
2106 // Disables uploading data, if it was enabled.
2107 func (t *Torrent) DisallowDataUpload() {
2110 t.dataUploadDisallowed = true
2111 for c := range t.conns {
2116 // Sets a handler that is called if there's an error writing a chunk to local storage. By default,
2117 // or if nil, a critical message is logged, and data download is disabled.
2118 func (t *Torrent) SetOnWriteChunkError(f func(error)) {
2121 t.userOnWriteChunkErr = f
2124 func (t *Torrent) iterPeers(f func(*Peer)) {
2125 for pc := range t.conns {
2128 for _, ws := range t.webSeeds {
2133 func (t *Torrent) callbacks() *Callbacks {
2134 return &t.cl.config.Callbacks
2137 var WebseedHttpClient = &http.Client{
2138 Transport: &http.Transport{
2139 MaxConnsPerHost: 10,
2143 func (t *Torrent) addWebSeed(url string) {
2144 if t.cl.config.DisableWebseeds {
2147 if _, ok := t.webSeeds[url]; ok {
2150 const maxRequests = 10
2156 reconciledHandshakeStats: true,
2157 peerSentHaveAll: true,
2158 // TODO: Raise this limit, and instead limit concurrent fetches.
2159 PeerMaxRequests: 32,
2160 RemoteAddr: remoteAddrFromUrl(url),
2161 callbacks: t.callbacks(),
2163 client: webseed.Client{
2164 // Consider a MaxConnsPerHost in the transport for this, possibly in a global Client.
2165 HttpClient: WebseedHttpClient,
2168 activeRequests: make(map[Request]webseed.Request, maxRequests),
2170 ws.requesterCond.L = t.cl.locker()
2171 for range iter.N(maxRequests) {
2174 for _, f := range t.callbacks().NewPeer {
2177 ws.peer.logger = t.logger.WithContextValue(&ws)
2178 ws.peer.peerImpl = &ws
2180 ws.onGotInfo(t.info)
2182 t.webSeeds[url] = &ws.peer
2185 func (t *Torrent) peerIsActive(p *Peer) (active bool) {
2186 t.iterPeers(func(p1 *Peer) {