18 "github.com/RoaringBitmap/roaring"
19 "github.com/anacrolix/chansync"
20 "github.com/anacrolix/chansync/events"
21 "github.com/anacrolix/dht/v2"
22 "github.com/anacrolix/log"
23 "github.com/anacrolix/missinggo/perf"
24 "github.com/anacrolix/missinggo/pubsub"
25 "github.com/anacrolix/missinggo/slices"
26 "github.com/anacrolix/missinggo/v2"
27 "github.com/anacrolix/missinggo/v2/bitmap"
28 "github.com/anacrolix/multiless"
29 "github.com/anacrolix/sync"
30 request_strategy "github.com/anacrolix/torrent/request-strategy"
31 "github.com/davecgh/go-spew/spew"
32 "github.com/pion/datachannel"
34 "github.com/anacrolix/torrent/bencode"
35 "github.com/anacrolix/torrent/common"
36 "github.com/anacrolix/torrent/metainfo"
37 pp "github.com/anacrolix/torrent/peer_protocol"
38 "github.com/anacrolix/torrent/segments"
39 "github.com/anacrolix/torrent/storage"
40 "github.com/anacrolix/torrent/tracker"
41 "github.com/anacrolix/torrent/webseed"
42 "github.com/anacrolix/torrent/webtorrent"
45 // Maintains state of torrent within a Client. Many methods should not be called before the info is
46 // available, see .Info and .GotInfo.
48 // Torrent-level aggregate statistics. First in struct to ensure 64-bit
49 // alignment. See #262.
54 networkingEnabled chansync.Flag
55 dataDownloadDisallowed chansync.Flag
56 dataUploadDisallowed bool
57 userOnWriteChunkErr func(error)
59 closed chansync.SetOnce
60 infoHash metainfo.Hash
62 // Values are the piece indices that changed.
63 pieceStateChanges *pubsub.PubSub
64 // The size of chunks to request from peers over the wire. This is
65 // normally 16KiB by convention these days.
68 // Total length of the torrent in bytes. Stored because it's not O(1) to
69 // get this from the info dict.
72 // The storage to open when the info dict becomes available.
73 storageOpener *storage.Client
74 // Storage for torrent data.
75 storage *storage.Torrent
76 // Read-locked for using storage, and write-locked for Closing.
77 storageLock sync.RWMutex
79 // TODO: Only announce stuff is used?
80 metainfo metainfo.MetaInfo
82 // The info dict. nil if we don't have it (yet).
84 fileIndex segments.Index
87 webSeeds map[string]*Peer
89 // Active peer connections, running message stream loops. TODO: Make this
90 // open (not-closed) connections only.
91 conns map[*PeerConn]struct{}
92 maxEstablishedConns int
93 // Set of addrs to which we're attempting to connect. Connections are
94 // half-open until all handshakes are completed.
95 halfOpen map[string]PeerInfo
97 // Reserve of peers to connect to. A peer can be both here and in the
98 // active connections if were told about the peer after connecting with
99 // them. That encourages us to reconnect to peers that are well known in
101 peers prioritizedPeers
102 // Whether we want to know to know more peers.
103 wantPeersEvent missinggo.Event
104 // An announcer for each tracker URL.
105 trackerAnnouncers map[string]torrentTrackerAnnouncer
106 // How many times we've initiated a DHT announce. TODO: Move into stats.
109 // Name used if the info name isn't available. Should be cleared when the
110 // Info does become available.
114 // The bencoded bytes of the info dict. This is actively manipulated if
115 // the info bytes aren't initially available, and we try to fetch them
118 // Each element corresponds to the 16KiB metadata pieces. If true, we have
119 // received that piece.
120 metadataCompletedChunks []bool
121 metadataChanged sync.Cond
123 // Closed when .Info is obtained.
124 gotMetainfoC chan struct{}
126 readers map[*reader]struct{}
127 _readerNowPieces bitmap.Bitmap
128 _readerReadaheadPieces bitmap.Bitmap
130 // A cache of pieces we need to get. Calculated from various piece and
131 // file priorities and completion states elsewhere.
132 _pendingPieces roaring.Bitmap
133 // A cache of completed piece indices.
134 _completedPieces roaring.Bitmap
135 // Pieces that need to be hashed.
136 piecesQueuedForHash bitmap.Bitmap
137 activePieceHashes int
138 initialPieceCheckDisabled bool
140 // Count of each request across active connections.
141 pendingRequests map[RequestIndex]*Peer
142 lastRequested map[RequestIndex]time.Time
143 // Chunks we've written to since the corresponding piece was last checked.
144 dirtyChunks roaring.Bitmap
148 // Is On when all pieces are complete.
149 Complete chansync.Flag
152 func (t *Torrent) pieceAvailabilityFromPeers(i pieceIndex) (count int) {
153 t.iterPeers(func(peer *Peer) {
154 if peer.peerHasPiece(i) {
161 func (t *Torrent) decPieceAvailability(i pieceIndex) {
166 if p.availability <= 0 {
167 panic(p.availability)
170 t.updatePieceRequestOrder(i)
173 func (t *Torrent) incPieceAvailability(i pieceIndex) {
174 // If we don't the info, this should be reconciled when we do.
178 t.updatePieceRequestOrder(i)
182 func (t *Torrent) readerNowPieces() bitmap.Bitmap {
183 return t._readerNowPieces
186 func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
187 return t._readerReadaheadPieces
190 func (t *Torrent) ignorePieceForRequests(i pieceIndex) bool {
191 return !t.wantPieceIndex(i)
194 // Returns a channel that is closed when the Torrent is closed.
195 func (t *Torrent) Closed() events.Done {
196 return t.closed.Done()
199 // KnownSwarm returns the known subset of the peers in the Torrent's swarm, including active,
200 // pending, and half-open peers.
201 func (t *Torrent) KnownSwarm() (ks []PeerInfo) {
202 // Add pending peers to the list
203 t.peers.Each(func(peer PeerInfo) {
204 ks = append(ks, peer)
207 // Add half-open peers to the list
208 for _, peer := range t.halfOpen {
209 ks = append(ks, peer)
212 // Add active peers to the list
213 for conn := range t.conns {
214 ks = append(ks, PeerInfo{
216 Addr: conn.RemoteAddr,
217 Source: conn.Discovery,
218 // > If the connection is encrypted, that's certainly enough to set SupportsEncryption.
219 // > But if we're not connected to them with an encrypted connection, I couldn't say
220 // > what's appropriate. We can carry forward the SupportsEncryption value as we
221 // > received it from trackers/DHT/PEX, or just use the encryption state for the
222 // > connection. It's probably easiest to do the latter for now.
223 // https://github.com/anacrolix/torrent/pull/188
224 SupportsEncryption: conn.headerEncrypted,
231 func (t *Torrent) setChunkSize(size pp.Integer) {
233 t.chunkPool = sync.Pool{
234 New: func() interface{} {
235 b := make([]byte, size)
241 func (t *Torrent) pieceComplete(piece pieceIndex) bool {
242 return t._completedPieces.Contains(bitmap.BitIndex(piece))
245 func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
246 return t.pieces[piece].Storage().Completion()
249 // There's a connection to that address already.
250 func (t *Torrent) addrActive(addr string) bool {
251 if _, ok := t.halfOpen[addr]; ok {
254 for c := range t.conns {
256 if ra.String() == addr {
263 func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn {
264 return t.appendConns(ret, func(conn *PeerConn) bool {
265 return !conn.closed.IsSet()
269 func (t *Torrent) appendConns(ret []*PeerConn, f func(*PeerConn) bool) []*PeerConn {
270 for c := range t.conns {
278 func (t *Torrent) addPeer(p PeerInfo) (added bool) {
280 torrent.Add(fmt.Sprintf("peers added by source %q", p.Source), 1)
281 if t.closed.IsSet() {
284 if ipAddr, ok := tryIpPortFromNetAddr(p.Addr); ok {
285 if cl.badPeerIPPort(ipAddr.IP, ipAddr.Port) {
286 torrent.Add("peers not added because of bad addr", 1)
287 // cl.logger.Printf("peers not added because of bad addr: %v", p)
291 if replaced, ok := t.peers.AddReturningReplacedPeer(p); ok {
292 torrent.Add("peers replaced", 1)
293 if !replaced.equal(p) {
294 t.logger.WithDefaultLevel(log.Debug).Printf("added %v replacing %v", p, replaced)
301 for t.peers.Len() > cl.config.TorrentPeersHighWater {
302 _, ok := t.peers.DeleteMin()
304 torrent.Add("excess reserve peers discarded", 1)
310 func (t *Torrent) invalidateMetadata() {
311 for i := 0; i < len(t.metadataCompletedChunks); i++ {
312 t.metadataCompletedChunks[i] = false
315 t.gotMetainfoC = make(chan struct{})
320 func (t *Torrent) saveMetadataPiece(index int, data []byte) {
324 if index >= len(t.metadataCompletedChunks) {
325 t.logger.Printf("%s: ignoring metadata piece %d", t, index)
328 copy(t.metadataBytes[(1<<14)*index:], data)
329 t.metadataCompletedChunks[index] = true
332 func (t *Torrent) metadataPieceCount() int {
333 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
336 func (t *Torrent) haveMetadataPiece(piece int) bool {
338 return (1<<14)*piece < len(t.metadataBytes)
340 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
344 func (t *Torrent) metadataSize() int {
345 return len(t.metadataBytes)
348 func infoPieceHashes(info *metainfo.Info) (ret [][]byte) {
349 for i := 0; i < len(info.Pieces); i += sha1.Size {
350 ret = append(ret, info.Pieces[i:i+sha1.Size])
355 func (t *Torrent) makePieces() {
356 hashes := infoPieceHashes(t.info)
357 t.pieces = make([]Piece, len(hashes))
358 for i, hash := range hashes {
359 piece := &t.pieces[i]
361 piece.index = pieceIndex(i)
362 piece.noPendingWrites.L = &piece.pendingWritesMutex
363 piece.hash = (*metainfo.Hash)(unsafe.Pointer(&hash[0]))
365 beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
366 endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
367 piece.files = files[beginFile:endFile]
368 piece.undirtiedChunksIter = undirtiedChunksIter{
369 TorrentDirtyChunks: &t.dirtyChunks,
370 StartRequestIndex: piece.requestIndexOffset(),
371 EndRequestIndex: piece.requestIndexOffset() + piece.numChunks(),
376 // Returns the index of the first file containing the piece. files must be
377 // ordered by offset.
378 func pieceFirstFileIndex(pieceOffset int64, files []*File) int {
379 for i, f := range files {
380 if f.offset+f.length > pieceOffset {
387 // Returns the index after the last file containing the piece. files must be
388 // ordered by offset.
389 func pieceEndFileIndex(pieceEndOffset int64, files []*File) int {
390 for i, f := range files {
391 if f.offset+f.length >= pieceEndOffset {
398 func (t *Torrent) cacheLength() {
400 for _, f := range t.info.UpvertedFiles() {
406 // TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
408 func (t *Torrent) setInfo(info *metainfo.Info) error {
409 if err := validateInfo(info); err != nil {
410 return fmt.Errorf("bad info: %s", err)
412 if t.storageOpener != nil {
414 t.storage, err = t.storageOpener.OpenTorrent(info, t.infoHash)
416 return fmt.Errorf("error opening torrent storage: %s", err)
423 t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
424 t.displayName = "" // Save a few bytes lol.
431 func (t *Torrent) pieceRequestOrderKey(i int) request_strategy.PieceRequestOrderKey {
432 return request_strategy.PieceRequestOrderKey{
433 InfoHash: t.infoHash,
438 // This seems to be all the follow-up tasks after info is set, that can't fail.
439 func (t *Torrent) onSetInfo() {
440 if t.cl.pieceRequestOrder == nil {
441 t.cl.pieceRequestOrder = make(map[storage.TorrentCapacity]*request_strategy.PieceRequestOrder)
443 if t.cl.pieceRequestOrder[t.storage.Capacity] == nil {
444 t.cl.pieceRequestOrder[t.storage.Capacity] = request_strategy.NewPieceOrder()
446 for i := range t.pieces {
448 // Need to add availability before updating piece completion, as that may result in conns
450 if p.availability != 0 {
451 panic(p.availability)
453 p.availability = int64(t.pieceAvailabilityFromPeers(i))
454 t.cl.pieceRequestOrder[t.storage.Capacity].Add(
455 t.pieceRequestOrderKey(i),
456 t.requestStrategyPieceOrderState(i))
457 t.updatePieceCompletion(pieceIndex(i))
458 if !t.initialPieceCheckDisabled && !p.storageCompletionOk {
459 // t.logger.Printf("piece %s completion unknown, queueing check", p)
460 t.queuePieceCheck(pieceIndex(i))
463 t.cl.event.Broadcast()
464 close(t.gotMetainfoC)
465 t.updateWantPeersEvent()
466 t.pendingRequests = make(map[RequestIndex]*Peer)
467 t.lastRequested = make(map[RequestIndex]time.Time)
468 t.tryCreateMorePieceHashers()
469 t.iterPeers(func(p *Peer) {
471 p.updateRequests("onSetInfo")
475 // Called when metadata for a torrent becomes available.
476 func (t *Torrent) setInfoBytesLocked(b []byte) error {
477 if metainfo.HashBytes(b) != t.infoHash {
478 return errors.New("info bytes have wrong hash")
480 var info metainfo.Info
481 if err := bencode.Unmarshal(b, &info); err != nil {
482 return fmt.Errorf("error unmarshalling info bytes: %s", err)
485 t.metadataCompletedChunks = nil
489 if err := t.setInfo(&info); err != nil {
496 func (t *Torrent) haveAllMetadataPieces() bool {
500 if t.metadataCompletedChunks == nil {
503 for _, have := range t.metadataCompletedChunks {
511 // TODO: Propagate errors to disconnect peer.
512 func (t *Torrent) setMetadataSize(size int) (err error) {
514 // We already know the correct metadata size.
517 if uint32(size) > maxMetadataSize {
518 return errors.New("bad size")
520 if len(t.metadataBytes) == size {
523 t.metadataBytes = make([]byte, size)
524 t.metadataCompletedChunks = make([]bool, (size+(1<<14)-1)/(1<<14))
525 t.metadataChanged.Broadcast()
526 for c := range t.conns {
527 c.requestPendingMetadata()
532 // The current working name for the torrent. Either the name in the info dict,
533 // or a display name given such as by the dn value in a magnet link, or "".
534 func (t *Torrent) name() string {
536 defer t.nameMu.RUnlock()
540 if t.displayName != "" {
543 return "infohash:" + t.infoHash.HexString()
546 func (t *Torrent) pieceState(index pieceIndex) (ret PieceState) {
547 p := &t.pieces[index]
548 ret.Priority = t.piecePriority(index)
549 ret.Completion = p.completion()
550 ret.QueuedForHash = p.queuedForHash()
551 ret.Hashing = p.hashing
552 ret.Checking = ret.QueuedForHash || ret.Hashing
553 ret.Marking = p.marking
554 if !ret.Complete && t.piecePartiallyDownloaded(index) {
560 func (t *Torrent) metadataPieceSize(piece int) int {
561 return metadataPieceSize(len(t.metadataBytes), piece)
564 func (t *Torrent) newMetadataExtensionMessage(c *PeerConn, msgType pp.ExtendedMetadataRequestMsgType, piece int, data []byte) pp.Message {
567 ExtendedID: c.PeerExtensionIDs[pp.ExtensionNameMetadata],
568 ExtendedPayload: append(bencode.MustMarshal(pp.ExtendedMetadataRequestMsg{
570 TotalSize: len(t.metadataBytes),
576 type pieceAvailabilityRun struct {
581 func (me pieceAvailabilityRun) String() string {
582 return fmt.Sprintf("%v(%v)", me.count, me.availability)
585 func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) {
586 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
587 ret = append(ret, pieceAvailabilityRun{availability: el.(int64), count: int(count)})
589 for i := range t.pieces {
590 rle.Append(t.pieces[i].availability, 1)
596 func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
597 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
598 ret = append(ret, PieceStateRun{
599 PieceState: el.(PieceState),
603 for index := range t.pieces {
604 rle.Append(t.pieceState(pieceIndex(index)), 1)
610 // Produces a small string representing a PieceStateRun.
611 func (psr PieceStateRun) String() (ret string) {
612 ret = fmt.Sprintf("%d", psr.Length)
613 ret += func() string {
614 switch psr.Priority {
615 case PiecePriorityNext:
617 case PiecePriorityNormal:
619 case PiecePriorityReadahead:
621 case PiecePriorityNow:
623 case PiecePriorityHigh:
632 if psr.QueuedForHash {
650 func (t *Torrent) writeStatus(w io.Writer) {
651 fmt.Fprintf(w, "Infohash: %s\n", t.infoHash.HexString())
652 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
654 fmt.Fprintf(w, "Metadata have: ")
655 for _, h := range t.metadataCompletedChunks {
656 fmt.Fprintf(w, "%c", func() rune {
666 fmt.Fprintf(w, "Piece length: %s\n",
669 return fmt.Sprintf("%v (%v chunks)",
671 float64(t.usualPieceSize())/float64(t.chunkSize))
678 fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
679 fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns())
680 fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
681 for _, run := range t.pieceAvailabilityRuns() {
682 ret = append(ret, run.String())
687 fmt.Fprintf(w, "Reader Pieces:")
688 t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
689 fmt.Fprintf(w, " %d:%d", begin, end)
694 fmt.Fprintf(w, "Enabled trackers:\n")
696 tw := tabwriter.NewWriter(w, 0, 0, 2, ' ', 0)
697 fmt.Fprintf(tw, " URL\tExtra\n")
698 for _, ta := range slices.Sort(slices.FromMapElems(t.trackerAnnouncers), func(l, r torrentTrackerAnnouncer) bool {
701 var luns, runs url.URL = *lu, *ru
704 var ml missinggo.MultiLess
705 ml.StrictNext(luns.String() == runs.String(), luns.String() < runs.String())
706 ml.StrictNext(lu.String() == ru.String(), lu.String() < ru.String())
708 }).([]torrentTrackerAnnouncer) {
709 fmt.Fprintf(tw, " %q\t%v\n", ta.URL(), ta.statusLine())
714 fmt.Fprintf(w, "DHT Announces: %d\n", t.numDHTAnnounces)
716 spew.NewDefaultConfig()
717 spew.Fdump(w, t.statsLocked())
719 peers := t.peersAsSlice()
720 sort.Slice(peers, func(_i, _j int) bool {
723 if less, ok := multiless.New().EagerSameLess(
724 i.downloadRate() == j.downloadRate(), i.downloadRate() < j.downloadRate(),
728 return worseConn(i, j)
730 for i, c := range peers {
731 fmt.Fprintf(w, "%2d. ", i+1)
736 func (t *Torrent) haveInfo() bool {
740 // Returns a run-time generated MetaInfo that includes the info bytes and
741 // announce-list as currently known to the client.
742 func (t *Torrent) newMetaInfo() metainfo.MetaInfo {
743 return metainfo.MetaInfo{
744 CreationDate: time.Now().Unix(),
745 Comment: "dynamic metainfo from client",
746 CreatedBy: "go.torrent",
747 AnnounceList: t.metainfo.UpvertedAnnounceList().Clone(),
748 InfoBytes: func() []byte {
750 return t.metadataBytes
755 UrlList: func() []string {
756 ret := make([]string, 0, len(t.webSeeds))
757 for url := range t.webSeeds {
758 ret = append(ret, url)
766 func (t *Torrent) BytesMissing() (n int64) {
768 n = t.bytesMissingLocked()
773 func (t *Torrent) bytesMissingLocked() int64 {
777 func iterFlipped(b *roaring.Bitmap, end uint64, cb func(uint32) bool) {
778 roaring.Flip(b, 0, end).Iterate(cb)
781 func (t *Torrent) bytesLeft() (left int64) {
782 iterFlipped(&t._completedPieces, uint64(t.numPieces()), func(x uint32) bool {
783 p := t.piece(pieceIndex(x))
784 left += int64(p.length() - p.numDirtyBytes())
790 // Bytes left to give in tracker announces.
791 func (t *Torrent) bytesLeftAnnounce() int64 {
799 func (t *Torrent) piecePartiallyDownloaded(piece pieceIndex) bool {
800 if t.pieceComplete(piece) {
803 if t.pieceAllDirty(piece) {
806 return t.pieces[piece].hasDirtyChunks()
809 func (t *Torrent) usualPieceSize() int {
810 return int(t.info.PieceLength)
813 func (t *Torrent) numPieces() pieceIndex {
814 return pieceIndex(t.info.NumPieces())
817 func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
818 return pieceIndex(t._completedPieces.GetCardinality())
821 func (t *Torrent) deletePieceRequestOrder() {
822 for i := 0; i < t.numPieces(); i++ {
823 t.cl.pieceRequestOrder[t.storage.Capacity].Delete(t.pieceRequestOrderKey(i))
827 func (t *Torrent) close(wg *sync.WaitGroup) (err error) {
829 if t.storage != nil {
834 defer t.storageLock.Unlock()
835 if f := t.storage.Close; f != nil {
838 t.logger.WithDefaultLevel(log.Warning).Printf("error closing storage: %v", err1)
843 t.iterPeers(func(p *Peer) {
846 if t.storage != nil {
847 t.deletePieceRequestOrder()
850 t.cl.event.Broadcast()
851 t.pieceStateChanges.Close()
852 t.updateWantPeersEvent()
856 func (t *Torrent) requestOffset(r Request) int64 {
857 return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
860 // Return the request that would include the given offset into the torrent data. Returns !ok if
861 // there is no such request.
862 func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
863 return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
866 func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
867 defer perf.ScopeTimerErr(&err)()
868 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
869 if err == nil && n != len(data) {
870 err = io.ErrShortWrite
875 func (t *Torrent) bitfield() (bf []bool) {
876 bf = make([]bool, t.numPieces())
877 t._completedPieces.Iterate(func(piece uint32) (again bool) {
884 func (t *Torrent) pieceNumChunks(piece pieceIndex) chunkIndexType {
885 return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
888 func (t *Torrent) chunksPerRegularPiece() uint32 {
889 return uint32((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
892 func (t *Torrent) numRequests() RequestIndex {
893 if t.numPieces() == 0 {
896 return uint32(t.numPieces()-1)*t.chunksPerRegularPiece() + t.pieceNumChunks(t.numPieces()-1)
899 func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
900 t.dirtyChunks.RemoveRange(
901 uint64(t.pieceRequestIndexOffset(pieceIndex)),
902 uint64(t.pieceRequestIndexOffset(pieceIndex+1)))
905 func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
906 if t.info.PieceLength == 0 {
907 // There will be no variance amongst pieces. Only pain.
910 if piece == t.numPieces()-1 {
911 ret := pp.Integer(*t.length % t.info.PieceLength)
916 return pp.Integer(t.info.PieceLength)
919 func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash, err error) {
921 p.waitNoPendingWrites()
922 storagePiece := t.pieces[piece].Storage()
924 // Does the backend want to do its own hashing?
925 if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
926 var sum metainfo.Hash
927 // log.Printf("A piece decided to self-hash: %d", piece)
928 sum, err = i.SelfHash()
929 missinggo.CopyExact(&ret, sum)
933 hash := pieceHash.New()
934 const logPieceContents = false
935 if logPieceContents {
936 var examineBuf bytes.Buffer
937 _, err = storagePiece.WriteTo(io.MultiWriter(hash, &examineBuf))
938 log.Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
940 _, err = storagePiece.WriteTo(hash)
942 missinggo.CopyExact(&ret, hash.Sum(nil))
946 func (t *Torrent) haveAnyPieces() bool {
947 return !t._completedPieces.IsEmpty()
950 func (t *Torrent) haveAllPieces() bool {
954 return t._completedPieces.GetCardinality() == bitmap.BitRange(t.numPieces())
957 func (t *Torrent) havePiece(index pieceIndex) bool {
958 return t.haveInfo() && t.pieceComplete(index)
961 func (t *Torrent) maybeDropMutuallyCompletePeer(
962 // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's
966 if !t.cl.config.DropMutuallyCompletePeers {
969 if !t.haveAllPieces() {
972 if all, known := p.peerHasAllPieces(); !(known && all) {
978 t.logger.WithDefaultLevel(log.Debug).Printf("dropping %v, which is mutually complete", p)
982 func (t *Torrent) haveChunk(r Request) (ret bool) {
984 // log.Println("have chunk", r, ret)
989 if t.pieceComplete(pieceIndex(r.Index)) {
992 p := &t.pieces[r.Index]
993 return !p.pendingChunk(r.ChunkSpec, t.chunkSize)
996 func chunkIndexFromChunkSpec(cs ChunkSpec, chunkSize pp.Integer) chunkIndexType {
997 return chunkIndexType(cs.Begin / chunkSize)
1000 func (t *Torrent) wantPieceIndex(index pieceIndex) bool {
1001 return t._pendingPieces.Contains(uint32(index))
1004 // A pool of []*PeerConn, to reduce allocations in functions that need to index or sort Torrent
1005 // conns (which is a map).
1006 var peerConnSlices sync.Pool
1008 func getPeerConnSlice(cap int) []*PeerConn {
1009 getInterface := peerConnSlices.Get()
1010 if getInterface == nil {
1011 return make([]*PeerConn, 0, cap)
1013 return getInterface.([]*PeerConn)[:0]
1017 // The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
1018 // connection is one that usually sends us unwanted pieces, or has been in the worse half of the
1019 // established connections for more than a minute. This is O(n log n). If there was a way to not
1020 // consider the position of a conn relative to the total number, it could be reduced to O(n).
1021 func (t *Torrent) worstBadConn() (ret *PeerConn) {
1022 wcs := worseConnSlice{conns: t.appendUnclosedConns(getPeerConnSlice(len(t.conns)))}
1023 defer peerConnSlices.Put(wcs.conns)
1026 for wcs.Len() != 0 {
1027 c := heap.Pop(&wcs).(*PeerConn)
1028 if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
1031 // If the connection is in the worst half of the established
1032 // connection quota and is older than a minute.
1033 if wcs.Len() >= (t.maxEstablishedConns+1)/2 {
1034 // Give connections 1 minute to prove themselves.
1035 if time.Since(c.completedHandshake) > time.Minute {
1043 type PieceStateChange struct {
1048 func (t *Torrent) publishPieceChange(piece pieceIndex) {
1049 t.cl._mu.Defer(func() {
1050 cur := t.pieceState(piece)
1051 p := &t.pieces[piece]
1052 if cur != p.publicPieceState {
1053 p.publicPieceState = cur
1054 t.pieceStateChanges.Publish(PieceStateChange{
1062 func (t *Torrent) pieceNumPendingChunks(piece pieceIndex) pp.Integer {
1063 if t.pieceComplete(piece) {
1066 return pp.Integer(t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks())
1069 func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
1070 return t.pieces[piece].allChunksDirty()
1073 func (t *Torrent) readersChanged() {
1074 t.updateReaderPieces()
1075 t.updateAllPiecePriorities("Torrent.readersChanged")
1078 func (t *Torrent) updateReaderPieces() {
1079 t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
1082 func (t *Torrent) readerPosChanged(from, to pieceRange) {
1086 t.updateReaderPieces()
1087 // Order the ranges, high and low.
1089 if l.begin > h.begin {
1092 if l.end < h.begin {
1093 // Two distinct ranges.
1094 t.updatePiecePriorities(l.begin, l.end, "Torrent.readerPosChanged")
1095 t.updatePiecePriorities(h.begin, h.end, "Torrent.readerPosChanged")
1102 t.updatePiecePriorities(l.begin, end, "Torrent.readerPosChanged")
1106 func (t *Torrent) maybeNewConns() {
1107 // Tickle the accept routine.
1108 t.cl.event.Broadcast()
1112 func (t *Torrent) piecePriorityChanged(piece pieceIndex, reason string) {
1113 if t._pendingPieces.Contains(uint32(piece)) {
1114 t.iterPeers(func(c *Peer) {
1115 // if c.actualRequestState.Interested {
1118 if !c.isLowOnRequests() {
1121 if !c.peerHasPiece(piece) {
1124 if c.peerChoking && !c.peerAllowedFast.Contains(uint32(piece)) {
1127 c.updateRequests(reason)
1131 t.publishPieceChange(piece)
1134 func (t *Torrent) updatePiecePriority(piece pieceIndex, reason string) {
1135 if !t.closed.IsSet() {
1136 // It would be possible to filter on pure-priority changes here to avoid churning the piece
1138 t.updatePieceRequestOrder(piece)
1140 p := &t.pieces[piece]
1141 newPrio := p.uncachedPriority()
1142 // t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
1143 if newPrio == PiecePriorityNone {
1144 if !t._pendingPieces.CheckedRemove(uint32(piece)) {
1148 if !t._pendingPieces.CheckedAdd(uint32(piece)) {
1152 t.piecePriorityChanged(piece, reason)
1155 func (t *Torrent) updateAllPiecePriorities(reason string) {
1156 t.updatePiecePriorities(0, t.numPieces(), reason)
1159 // Update all piece priorities in one hit. This function should have the same
1160 // output as updatePiecePriority, but across all pieces.
1161 func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason string) {
1162 for i := begin; i < end; i++ {
1163 t.updatePiecePriority(i, reason)
1167 // Returns the range of pieces [begin, end) that contains the extent of bytes.
1168 func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
1169 if off >= *t.length {
1179 begin = pieceIndex(off / t.info.PieceLength)
1180 end = pieceIndex((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
1181 if end > pieceIndex(t.info.NumPieces()) {
1182 end = pieceIndex(t.info.NumPieces())
1187 // Returns true if all iterations complete without breaking. Returns the read regions for all
1188 // readers. The reader regions should not be merged as some callers depend on this method to
1189 // enumerate readers.
1190 func (t *Torrent) forReaderOffsetPieces(f func(begin, end pieceIndex) (more bool)) (all bool) {
1191 for r := range t.readers {
1193 if p.begin >= p.end {
1196 if !f(p.begin, p.end) {
1203 func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
1204 return t.piece(piece).uncachedPriority()
1207 func (t *Torrent) pendRequest(req RequestIndex) {
1208 t.piece(int(req / t.chunksPerRegularPiece())).pendChunkIndex(req % t.chunksPerRegularPiece())
1211 func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason string) {
1212 t.cl.event.Broadcast()
1213 if t.pieceComplete(piece) {
1214 t.onPieceCompleted(piece)
1216 t.onIncompletePiece(piece)
1218 t.updatePiecePriority(piece, reason)
1221 func (t *Torrent) numReceivedConns() (ret int) {
1222 for c := range t.conns {
1223 if c.Discovery == PeerSourceIncoming {
1230 func (t *Torrent) maxHalfOpen() int {
1231 // Note that if we somehow exceed the maximum established conns, we want
1232 // the negative value to have an effect.
1233 establishedHeadroom := int64(t.maxEstablishedConns - len(t.conns))
1234 extraIncoming := int64(t.numReceivedConns() - t.maxEstablishedConns/2)
1235 // We want to allow some experimentation with new peers, and to try to
1236 // upset an oversupply of received connections.
1237 return int(min(max(5, extraIncoming)+establishedHeadroom, int64(t.cl.config.HalfOpenConnsPerTorrent)))
1240 func (t *Torrent) openNewConns() (initiated int) {
1241 defer t.updateWantPeersEvent()
1242 for t.peers.Len() != 0 {
1246 if len(t.halfOpen) >= t.maxHalfOpen() {
1249 if len(t.cl.dialers) == 0 {
1252 if t.cl.numHalfOpen >= t.cl.config.TotalHalfOpenConns {
1255 p := t.peers.PopMax()
1262 func (t *Torrent) updatePieceCompletion(piece pieceIndex) bool {
1264 uncached := t.pieceCompleteUncached(piece)
1265 cached := p.completion()
1266 changed := cached != uncached
1267 complete := uncached.Complete
1268 p.storageCompletionOk = uncached.Ok
1271 t._completedPieces.Add(x)
1274 t._completedPieces.Remove(x)
1276 p.t.updatePieceRequestOrder(piece)
1278 if complete && len(p.dirtiers) != 0 {
1279 t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
1282 log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).SetLevel(log.Debug).Log(t.logger)
1283 t.pieceCompletionChanged(piece, "Torrent.updatePieceCompletion")
1288 // Non-blocking read. Client lock is not required.
1289 func (t *Torrent) readAt(b []byte, off int64) (n int, err error) {
1291 p := &t.pieces[off/t.info.PieceLength]
1292 p.waitNoPendingWrites()
1294 n1, err = p.Storage().ReadAt(b, off-p.Info().Offset())
1305 // Returns an error if the metadata was completed, but couldn't be set for some reason. Blame it on
1306 // the last peer to contribute. TODO: Actually we shouldn't blame peers for failure to open storage
1307 // etc. Also we should probably cached metadata pieces per-Peer, to isolate failure appropriately.
1308 func (t *Torrent) maybeCompleteMetadata() error {
1313 if !t.haveAllMetadataPieces() {
1314 // Don't have enough metadata pieces.
1317 err := t.setInfoBytesLocked(t.metadataBytes)
1319 t.invalidateMetadata()
1320 return fmt.Errorf("error setting info bytes: %s", err)
1322 if t.cl.config.Debug {
1323 t.logger.Printf("%s: got metadata from peers", t)
1328 func (t *Torrent) readerPiecePriorities() (now, readahead bitmap.Bitmap) {
1329 t.forReaderOffsetPieces(func(begin, end pieceIndex) bool {
1331 now.Add(bitmap.BitIndex(begin))
1332 readahead.AddRange(bitmap.BitRange(begin)+1, bitmap.BitRange(end))
1339 func (t *Torrent) needData() bool {
1340 if t.closed.IsSet() {
1346 return !t._pendingPieces.IsEmpty()
1349 func appendMissingStrings(old, new []string) (ret []string) {
1352 for _, n := range new {
1353 for _, o := range old {
1358 ret = append(ret, n)
1363 func appendMissingTrackerTiers(existing [][]string, minNumTiers int) (ret [][]string) {
1365 for minNumTiers > len(ret) {
1366 ret = append(ret, nil)
1371 func (t *Torrent) addTrackers(announceList [][]string) {
1372 fullAnnounceList := &t.metainfo.AnnounceList
1373 t.metainfo.AnnounceList = appendMissingTrackerTiers(*fullAnnounceList, len(announceList))
1374 for tierIndex, trackerURLs := range announceList {
1375 (*fullAnnounceList)[tierIndex] = appendMissingStrings((*fullAnnounceList)[tierIndex], trackerURLs)
1377 t.startMissingTrackerScrapers()
1378 t.updateWantPeersEvent()
1381 // Don't call this before the info is available.
1382 func (t *Torrent) bytesCompleted() int64 {
1386 return *t.length - t.bytesLeft()
1389 func (t *Torrent) SetInfoBytes(b []byte) (err error) {
1392 return t.setInfoBytesLocked(b)
1395 // Returns true if connection is removed from torrent.Conns.
1396 func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
1397 if !c.closed.IsSet() {
1398 panic("connection is not closed")
1399 // There are behaviours prevented by the closed state that will fail
1400 // if the connection has been deleted.
1404 // Avoid adding a drop event more than once. Probably we should track whether we've generated
1405 // the drop event against the PexConnState instead.
1407 if !t.cl.config.DisablePEX {
1411 torrent.Add("deleted connections", 1)
1412 c.deleteAllRequests()
1413 t.assertPendingRequests()
1417 func (t *Torrent) decPeerPieceAvailability(p *Peer) {
1421 p.newPeerPieces().Iterate(func(i uint32) bool {
1422 p.t.decPieceAvailability(pieceIndex(i))
1427 func (t *Torrent) assertPendingRequests() {
1431 // var actual pendingRequests
1432 // if t.haveInfo() {
1433 // actual.m = make([]int, t.numRequests())
1435 // t.iterPeers(func(p *Peer) {
1436 // p.actualRequestState.Requests.Iterate(func(x uint32) bool {
1441 // diff := cmp.Diff(actual.m, t.pendingRequests.m)
1447 func (t *Torrent) dropConnection(c *PeerConn) {
1448 t.cl.event.Broadcast()
1450 if t.deletePeerConn(c) {
1455 // Peers as in contact information for dialing out.
1456 func (t *Torrent) wantPeers() bool {
1457 if t.closed.IsSet() {
1460 if t.peers.Len() > t.cl.config.TorrentPeersLowWater {
1463 return t.wantConns()
1466 func (t *Torrent) updateWantPeersEvent() {
1468 t.wantPeersEvent.Set()
1470 t.wantPeersEvent.Clear()
1474 // Returns whether the client should make effort to seed the torrent.
1475 func (t *Torrent) seeding() bool {
1477 if t.closed.IsSet() {
1480 if t.dataUploadDisallowed {
1483 if cl.config.NoUpload {
1486 if !cl.config.Seed {
1489 if cl.config.DisableAggressiveUpload && t.needData() {
1495 func (t *Torrent) onWebRtcConn(
1496 c datachannel.ReadWriteCloser,
1497 dcc webtorrent.DataChannelContext,
1500 pc, err := t.cl.initiateProtocolHandshakes(
1501 context.Background(),
1502 webrtcNetConn{c, dcc},
1506 webrtcNetAddr{dcc.Remote},
1508 fmt.Sprintf("webrtc offer_id %x", dcc.OfferId),
1511 t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
1514 if dcc.LocalOffered {
1515 pc.Discovery = PeerSourceTracker
1517 pc.Discovery = PeerSourceIncoming
1519 pc.conn.SetWriteDeadline(time.Time{})
1522 err = t.cl.runHandshookConn(pc, t)
1524 t.logger.WithDefaultLevel(log.Critical).Printf("error running handshook webrtc conn: %v", err)
1528 func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
1529 err := t.cl.runHandshookConn(pc, t)
1530 if err != nil || logAll {
1531 t.logger.WithDefaultLevel(level).Printf("error running handshook conn: %v", err)
1535 func (t *Torrent) runHandshookConnLoggingErr(pc *PeerConn) {
1536 t.logRunHandshookConn(pc, false, log.Debug)
1539 func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
1540 wtc, release := t.cl.websocketTrackers.Get(u.String())
1545 wst := websocketTrackerStatus{u, wtc}
1547 err := wtc.Announce(tracker.Started, t.infoHash)
1549 t.logger.WithDefaultLevel(log.Warning).Printf(
1550 "error in initial announce to %q: %v",
1558 func (t *Torrent) startScrapingTracker(_url string) {
1562 u, err := url.Parse(_url)
1564 // URLs with a leading '*' appear to be a uTorrent convention to
1565 // disable trackers.
1567 log.Str("error parsing tracker url").AddValues("url", _url).Log(t.logger)
1571 if u.Scheme == "udp" {
1573 t.startScrapingTracker(u.String())
1575 t.startScrapingTracker(u.String())
1578 if _, ok := t.trackerAnnouncers[_url]; ok {
1581 sl := func() torrentTrackerAnnouncer {
1584 if t.cl.config.DisableWebtorrent {
1587 return t.startWebsocketAnnouncer(*u)
1589 if t.cl.config.DisableIPv4Peers || t.cl.config.DisableIPv4 {
1593 if t.cl.config.DisableIPv6 {
1597 newAnnouncer := &trackerScraper{
1600 lookupTrackerIp: t.cl.config.LookupTrackerIp,
1602 go newAnnouncer.Run()
1608 if t.trackerAnnouncers == nil {
1609 t.trackerAnnouncers = make(map[string]torrentTrackerAnnouncer)
1611 t.trackerAnnouncers[_url] = sl
1614 // Adds and starts tracker scrapers for tracker URLs that aren't already
1616 func (t *Torrent) startMissingTrackerScrapers() {
1617 if t.cl.config.DisableTrackers {
1620 t.startScrapingTracker(t.metainfo.Announce)
1621 for _, tier := range t.metainfo.AnnounceList {
1622 for _, url := range tier {
1623 t.startScrapingTracker(url)
1628 // Returns an AnnounceRequest with fields filled out to defaults and current
1630 func (t *Torrent) announceRequest(event tracker.AnnounceEvent) tracker.AnnounceRequest {
1631 // Note that IPAddress is not set. It's set for UDP inside the tracker code, since it's
1632 // dependent on the network in use.
1633 return tracker.AnnounceRequest{
1635 NumWant: func() int32 {
1636 if t.wantPeers() && len(t.cl.dialers) > 0 {
1642 Port: uint16(t.cl.incomingPeerPort()),
1643 PeerId: t.cl.peerID,
1644 InfoHash: t.infoHash,
1645 Key: t.cl.announceKey(),
1647 // The following are vaguely described in BEP 3.
1649 Left: t.bytesLeftAnnounce(),
1650 Uploaded: t.stats.BytesWrittenData.Int64(),
1651 // There's no mention of wasted or unwanted download in the BEP.
1652 Downloaded: t.stats.BytesReadUsefulData.Int64(),
1656 // Adds peers revealed in an announce until the announce ends, or we have
1658 func (t *Torrent) consumeDhtAnnouncePeers(pvs <-chan dht.PeersValues) {
1660 for v := range pvs {
1663 for _, cp := range v.Peers {
1665 // Can't do anything with this.
1668 if t.addPeer(PeerInfo{
1669 Addr: ipPortAddr{cp.IP, cp.Port},
1670 Source: PeerSourceDhtGetPeers,
1677 // log.Printf("added %v peers from dht for %v", added, t.InfoHash().HexString())
1682 // Announce using the provided DHT server. Peers are consumed automatically. done is closed when the
1683 // announce ends. stop will force the announce to end.
1684 func (t *Torrent) AnnounceToDht(s DhtServer) (done <-chan struct{}, stop func(), err error) {
1685 ps, err := s.Announce(t.infoHash, t.cl.incomingPeerPort(), true)
1689 _done := make(chan struct{})
1693 t.consumeDhtAnnouncePeers(ps.Peers())
1699 func (t *Torrent) timeboxedAnnounceToDht(s DhtServer) error {
1700 _, stop, err := t.AnnounceToDht(s)
1705 case <-t.closed.Done():
1706 case <-time.After(5 * time.Minute):
1712 func (t *Torrent) dhtAnnouncer(s DhtServer) {
1718 if t.closed.IsSet() {
1721 // We're also announcing ourselves as a listener, so we don't just want peer addresses.
1722 // TODO: We can include the announce_peer step depending on whether we can receive
1723 // inbound connections. We should probably only announce once every 15 mins too.
1727 // TODO: Determine if there's a listener on the port we're announcing.
1728 if len(cl.dialers) == 0 && len(cl.listeners) == 0 {
1739 err := t.timeboxedAnnounceToDht(s)
1741 t.logger.WithDefaultLevel(log.Warning).Printf("error announcing %q to DHT: %s", t, err)
1747 func (t *Torrent) addPeers(peers []PeerInfo) (added int) {
1748 for _, p := range peers {
1756 // The returned TorrentStats may require alignment in memory. See
1757 // https://github.com/anacrolix/torrent/issues/383.
1758 func (t *Torrent) Stats() TorrentStats {
1760 defer t.cl.rUnlock()
1761 return t.statsLocked()
1764 func (t *Torrent) statsLocked() (ret TorrentStats) {
1765 ret.ActivePeers = len(t.conns)
1766 ret.HalfOpenPeers = len(t.halfOpen)
1767 ret.PendingPeers = t.peers.Len()
1768 ret.TotalPeers = t.numTotalPeers()
1769 ret.ConnectedSeeders = 0
1770 for c := range t.conns {
1771 if all, ok := c.peerHasAllPieces(); all && ok {
1772 ret.ConnectedSeeders++
1775 ret.ConnStats = t.stats.Copy()
1776 ret.PiecesComplete = t.numPiecesCompleted()
1780 // The total number of peers in the torrent.
1781 func (t *Torrent) numTotalPeers() int {
1782 peers := make(map[string]struct{})
1783 for conn := range t.conns {
1784 ra := conn.conn.RemoteAddr()
1786 // It's been closed and doesn't support RemoteAddr.
1789 peers[ra.String()] = struct{}{}
1791 for addr := range t.halfOpen {
1792 peers[addr] = struct{}{}
1794 t.peers.Each(func(peer PeerInfo) {
1795 peers[peer.Addr.String()] = struct{}{}
1800 // Reconcile bytes transferred before connection was associated with a
1802 func (t *Torrent) reconcileHandshakeStats(c *PeerConn) {
1803 if c._stats != (ConnStats{
1804 // Handshakes should only increment these fields:
1805 BytesWritten: c._stats.BytesWritten,
1806 BytesRead: c._stats.BytesRead,
1810 c.postHandshakeStats(func(cs *ConnStats) {
1811 cs.BytesRead.Add(c._stats.BytesRead.Int64())
1812 cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
1814 c.reconciledHandshakeStats = true
1817 // Returns true if the connection is added.
1818 func (t *Torrent) addPeerConn(c *PeerConn) (err error) {
1821 torrent.Add("added connections", 1)
1824 if t.closed.IsSet() {
1825 return errors.New("torrent closed")
1827 for c0 := range t.conns {
1828 if c.PeerID != c0.PeerID {
1831 if !t.cl.config.DropDuplicatePeerIds {
1834 if left, ok := c.hasPreferredNetworkOver(c0); ok && left {
1836 t.deletePeerConn(c0)
1838 return errors.New("existing connection preferred")
1841 if len(t.conns) >= t.maxEstablishedConns {
1842 c := t.worstBadConn()
1844 return errors.New("don't want conns")
1849 if len(t.conns) >= t.maxEstablishedConns {
1852 t.conns[c] = struct{}{}
1853 if !t.cl.config.DisablePEX && !c.PeerExtensionBytes.SupportsExtended() {
1854 t.pex.Add(c) // as no further extended handshake expected
1859 func (t *Torrent) wantConns() bool {
1860 if !t.networkingEnabled.Bool() {
1863 if t.closed.IsSet() {
1866 if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
1869 return len(t.conns) < t.maxEstablishedConns || t.worstBadConn() != nil
1872 func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
1875 oldMax = t.maxEstablishedConns
1876 t.maxEstablishedConns = max
1877 wcs := worseConnSlice{
1878 conns: t.appendConns(nil, func(*PeerConn) bool {
1884 for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
1885 t.dropConnection(heap.Pop(&wcs).(*PeerConn))
1891 func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
1892 t.logger.Log(log.Fstr("hashed piece %d (passed=%t)", piece, passed).SetLevel(log.Debug))
1895 t.cl.event.Broadcast()
1896 if t.closed.IsSet() {
1900 // Don't score the first time a piece is hashed, it could be an initial check.
1901 if p.storageCompletionOk {
1903 pieceHashedCorrect.Add(1)
1906 "piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
1907 ).AddValues(t, p).SetLevel(log.Debug).Log(t.logger)
1908 pieceHashedNotCorrect.Add(1)
1913 t.publishPieceChange(piece)
1916 t.publishPieceChange(piece)
1920 if len(p.dirtiers) != 0 {
1921 // Don't increment stats above connection-level for every involved connection.
1922 t.allStats((*ConnStats).incrementPiecesDirtiedGood)
1924 for c := range p.dirtiers {
1925 c._stats.incrementPiecesDirtiedGood()
1927 t.clearPieceTouchers(piece)
1929 err := p.Storage().MarkComplete()
1931 t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
1935 if t.closed.IsSet() {
1938 t.pendAllChunkSpecs(piece)
1940 if len(p.dirtiers) != 0 && p.allChunksDirty() && hashIoErr == nil {
1941 // Peers contributed to all the data for this piece hash failure, and the failure was
1942 // not due to errors in the storage (such as data being dropped in a cache).
1944 // Increment Torrent and above stats, and then specific connections.
1945 t.allStats((*ConnStats).incrementPiecesDirtiedBad)
1946 for c := range p.dirtiers {
1947 // Y u do dis peer?!
1948 c.stats().incrementPiecesDirtiedBad()
1951 bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
1952 for c := range p.dirtiers {
1954 bannableTouchers = append(bannableTouchers, c)
1957 t.clearPieceTouchers(piece)
1958 slices.Sort(bannableTouchers, connLessTrusted)
1960 if t.cl.config.Debug {
1962 "bannable conns by trust for piece %d: %v",
1964 func() (ret []connectionTrust) {
1965 for _, c := range bannableTouchers {
1966 ret = append(ret, c.trust())
1973 if len(bannableTouchers) >= 1 {
1974 c := bannableTouchers[0]
1975 t.cl.banPeerIP(c.remoteIp())
1979 t.onIncompletePiece(piece)
1980 p.Storage().MarkNotComplete()
1982 t.updatePieceCompletion(piece)
1985 func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
1986 // TODO: Make faster
1987 for cn := range t.conns {
1992 func (t *Torrent) onPieceCompleted(piece pieceIndex) {
1993 t.pendAllChunkSpecs(piece)
1994 t.cancelRequestsForPiece(piece)
1995 t.piece(piece).readerCond.Broadcast()
1996 for conn := range t.conns {
1998 t.maybeDropMutuallyCompletePeer(&conn.Peer)
2002 // Called when a piece is found to be not complete.
2003 func (t *Torrent) onIncompletePiece(piece pieceIndex) {
2004 if t.pieceAllDirty(piece) {
2005 t.pendAllChunkSpecs(piece)
2007 if !t.wantPieceIndex(piece) {
2008 // t.logger.Printf("piece %d incomplete and unwanted", piece)
2011 // We could drop any connections that we told we have a piece that we
2012 // don't here. But there's a test failure, and it seems clients don't care
2013 // if you request pieces that you already claim to have. Pruning bad
2014 // connections might just remove any connections that aren't treating us
2015 // favourably anyway.
2017 // for c := range t.conns {
2018 // if c.sentHave(piece) {
2022 t.iterPeers(func(conn *Peer) {
2023 if conn.peerHasPiece(piece) {
2024 conn.updateRequests("piece incomplete")
2029 func (t *Torrent) tryCreateMorePieceHashers() {
2030 for !t.closed.IsSet() && t.activePieceHashes < 2 && t.tryCreatePieceHasher() {
2034 func (t *Torrent) tryCreatePieceHasher() bool {
2035 if t.storage == nil {
2038 pi, ok := t.getPieceToHash()
2043 t.piecesQueuedForHash.Remove(bitmap.BitIndex(pi))
2045 t.publishPieceChange(pi)
2046 t.updatePiecePriority(pi, "Torrent.tryCreatePieceHasher")
2047 t.storageLock.RLock()
2048 t.activePieceHashes++
2049 go t.pieceHasher(pi)
2053 func (t *Torrent) getPieceToHash() (ret pieceIndex, ok bool) {
2054 t.piecesQueuedForHash.IterTyped(func(i pieceIndex) bool {
2055 if t.piece(i).hashing {
2065 func (t *Torrent) pieceHasher(index pieceIndex) {
2067 sum, copyErr := t.hashPiece(index)
2068 correct := sum == *p.hash
2072 log.Fmsg("piece %v (%s) hash failure copy error: %v", p, p.hash.HexString(), copyErr).Log(t.logger)
2074 t.storageLock.RUnlock()
2078 t.pieceHashed(index, correct, copyErr)
2079 t.updatePiecePriority(index, "Torrent.pieceHasher")
2080 t.activePieceHashes--
2081 t.tryCreateMorePieceHashers()
2084 // Return the connections that touched a piece, and clear the entries while doing it.
2085 func (t *Torrent) clearPieceTouchers(pi pieceIndex) {
2087 for c := range p.dirtiers {
2088 delete(c.peerTouchedPieces, pi)
2089 delete(p.dirtiers, c)
2093 func (t *Torrent) peersAsSlice() (ret []*Peer) {
2094 t.iterPeers(func(p *Peer) {
2095 ret = append(ret, p)
2100 func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
2101 piece := t.piece(pieceIndex)
2102 if piece.queuedForHash() {
2105 t.piecesQueuedForHash.Add(bitmap.BitIndex(pieceIndex))
2106 t.publishPieceChange(pieceIndex)
2107 t.updatePiecePriority(pieceIndex, "Torrent.queuePieceCheck")
2108 t.tryCreateMorePieceHashers()
2111 // Forces all the pieces to be re-hashed. See also Piece.VerifyData. This should not be called
2112 // before the Info is available.
2113 func (t *Torrent) VerifyData() {
2114 for i := pieceIndex(0); i < t.NumPieces(); i++ {
2115 t.Piece(i).VerifyData()
2119 // Start the process of connecting to the given peer for the given torrent if appropriate.
2120 func (t *Torrent) initiateConn(peer PeerInfo) {
2121 if peer.Id == t.cl.peerID {
2124 if t.cl.badPeerAddr(peer.Addr) && !peer.Trusted {
2128 if t.addrActive(addr.String()) {
2132 t.halfOpen[addr.String()] = peer
2133 go t.cl.outgoingConnection(t, addr, peer.Source, peer.Trusted)
2136 // Adds a trusted, pending peer for each of the given Client's addresses. Typically used in tests to
2137 // quickly make one Client visible to the Torrent of another Client.
2138 func (t *Torrent) AddClientPeer(cl *Client) int {
2139 return t.AddPeers(func() (ps []PeerInfo) {
2140 for _, la := range cl.ListenAddrs() {
2141 ps = append(ps, PeerInfo{
2150 // All stats that include this Torrent. Useful when we want to increment ConnStats but not for every
2152 func (t *Torrent) allStats(f func(*ConnStats)) {
2157 func (t *Torrent) hashingPiece(i pieceIndex) bool {
2158 return t.pieces[i].hashing
2161 func (t *Torrent) pieceQueuedForHash(i pieceIndex) bool {
2162 return t.piecesQueuedForHash.Get(bitmap.BitIndex(i))
2165 func (t *Torrent) dialTimeout() time.Duration {
2166 return reducedDialTimeout(t.cl.config.MinDialTimeout, t.cl.config.NominalDialTimeout, t.cl.config.HalfOpenConnsPerTorrent, t.peers.Len())
2169 func (t *Torrent) piece(i int) *Piece {
2173 func (t *Torrent) onWriteChunkErr(err error) {
2174 if t.userOnWriteChunkErr != nil {
2175 go t.userOnWriteChunkErr(err)
2178 t.logger.WithDefaultLevel(log.Critical).Printf("default chunk write error handler: disabling data download")
2179 t.disallowDataDownloadLocked()
2182 func (t *Torrent) DisallowDataDownload() {
2183 t.disallowDataDownloadLocked()
2186 func (t *Torrent) disallowDataDownloadLocked() {
2187 t.dataDownloadDisallowed.Set()
2190 func (t *Torrent) AllowDataDownload() {
2191 t.dataDownloadDisallowed.Clear()
2194 // Enables uploading data, if it was disabled.
2195 func (t *Torrent) AllowDataUpload() {
2198 t.dataUploadDisallowed = false
2199 for c := range t.conns {
2200 c.updateRequests("allow data upload")
2204 // Disables uploading data, if it was enabled.
2205 func (t *Torrent) DisallowDataUpload() {
2208 t.dataUploadDisallowed = true
2209 for c := range t.conns {
2210 c.updateRequests("disallow data upload")
2214 // Sets a handler that is called if there's an error writing a chunk to local storage. By default,
2215 // or if nil, a critical message is logged, and data download is disabled.
2216 func (t *Torrent) SetOnWriteChunkError(f func(error)) {
2219 t.userOnWriteChunkErr = f
2222 func (t *Torrent) iterPeers(f func(p *Peer)) {
2223 for pc := range t.conns {
2226 for _, ws := range t.webSeeds {
2231 func (t *Torrent) callbacks() *Callbacks {
2232 return &t.cl.config.Callbacks
2235 func (t *Torrent) addWebSeed(url string) {
2236 if t.cl.config.DisableWebseeds {
2239 if _, ok := t.webSeeds[url]; ok {
2242 // I don't think Go http supports pipelining requests. However, we can have more ready to go
2243 // right away. This value should be some multiple of the number of connections to a host. I
2244 // would expect that double maxRequests plus a bit would be appropriate. This value is based on
2245 // downloading Sintel (08ada5a7a6183aae1e09d831df6748d566095a10) from
2246 // "https://webtorrent.io/torrents/".
2247 const maxRequests = 16
2253 reconciledHandshakeStats: true,
2254 // This should affect how often we have to recompute requests for this peer. Note that
2255 // because we can request more than 1 thing at a time over HTTP, we will hit the low
2256 // requests mark more often, so recomputation is probably sooner than with regular peer
2257 // conns. ~4x maxRequests would be about right.
2258 PeerMaxRequests: 128,
2259 RemoteAddr: remoteAddrFromUrl(url),
2260 callbacks: t.callbacks(),
2262 client: webseed.Client{
2263 HttpClient: t.cl.webseedHttpClient,
2266 activeRequests: make(map[Request]webseed.Request, maxRequests),
2267 maxRequests: maxRequests,
2269 ws.peer.initUpdateRequestsTimer()
2270 ws.requesterCond.L = t.cl.locker()
2271 for i := 0; i < maxRequests; i += 1 {
2274 for _, f := range t.callbacks().NewPeer {
2277 ws.peer.logger = t.logger.WithContextValue(&ws)
2278 ws.peer.peerImpl = &ws
2280 ws.onGotInfo(t.info)
2282 t.webSeeds[url] = &ws.peer
2285 func (t *Torrent) peerIsActive(p *Peer) (active bool) {
2286 t.iterPeers(func(p1 *Peer) {
2294 func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request {
2295 index := ri / t.chunksPerRegularPiece()
2298 t.piece(int(index)).chunkIndexSpec(ri % t.chunksPerRegularPiece()),
2302 func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex {
2303 return t.pieceRequestIndexOffset(pieceIndex(r.Index)) + uint32(r.Begin/t.chunkSize)
2306 func (t *Torrent) pieceRequestIndexOffset(piece pieceIndex) RequestIndex {
2307 return RequestIndex(piece) * t.chunksPerRegularPiece()
2310 func (t *Torrent) updateComplete() {
2311 t.Complete.SetBool(t.haveAllPieces())