16 "github.com/anacrolix/missinggo"
17 "github.com/anacrolix/missinggo/bitmap"
18 "github.com/anacrolix/missinggo/itertools"
19 "github.com/anacrolix/missinggo/perf"
20 "github.com/anacrolix/missinggo/pubsub"
21 "github.com/bradfitz/iter"
23 "github.com/anacrolix/torrent/bencode"
24 "github.com/anacrolix/torrent/metainfo"
25 pp "github.com/anacrolix/torrent/peer_protocol"
26 "github.com/anacrolix/torrent/storage"
29 func (t *torrent) chunkIndexSpec(chunkIndex, piece int) chunkSpec {
30 return chunkIndexSpec(chunkIndex, t.pieceLength(piece), t.chunkSize)
33 type peersKey struct {
38 // Maintains state of torrent within a Client.
44 // Closed when no more network activity is desired. This includes
45 // announcing, and communicating with peers.
46 ceasingNetworking chan struct{}
48 InfoHash metainfo.InfoHash
50 // Values are the piece indices that changed.
51 pieceStateChanges *pubsub.PubSub
53 // Total length of the torrent in bytes. Stored because it's not O(1) to
54 // get this from the info dict.
57 storageOpener storage.I
58 storage storage.Torrent
60 // The info dict. Nil if we don't have it (yet).
62 // Active peer connections, running message stream loops.
64 // Set of addrs to which we're attempting to connect. Connections are
65 // half-open until all handshakes are completed.
66 halfOpen map[string]struct{}
68 // Reserve of peers to connect to. A peer can be both here and in the
69 // active connections if were told about the peer after connecting with
70 // them. That encourages us to reconnect to peers that are well known.
71 peers map[peersKey]Peer
74 // BEP 12 Multitracker Metadata Extension. The tracker.Client instances
75 // mirror their respective URLs from the announce-list metainfo key.
76 trackers []trackerTier
77 // Name used if the info name isn't available.
79 // The bencoded bytes of the info dict.
81 // Each element corresponds to the 16KiB metadata pieces. If true, we have
82 // received that piece.
83 metadataCompletedChunks []bool
85 // Closed when .Info is set.
86 gotMetainfo chan struct{}
88 readers map[*Reader]struct{}
90 pendingPieces bitmap.Bitmap
91 completedPieces bitmap.Bitmap
93 connPieceInclinationPool sync.Pool
97 pieceInclinationsReused = expvar.NewInt("pieceInclinationsReused")
98 pieceInclinationsNew = expvar.NewInt("pieceInclinationsNew")
99 pieceInclinationsPut = expvar.NewInt("pieceInclinationsPut")
102 func (t *torrent) setDisplayName(dn string) {
106 func (t *torrent) pieceComplete(piece int) bool {
107 return t.completedPieces.Get(piece)
110 func (t *torrent) pieceCompleteUncached(piece int) bool {
111 return t.pieces[piece].Storage().GetIsComplete()
114 func (t *torrent) numConnsUnchoked() (num int) {
115 for _, c := range t.conns {
123 // There's a connection to that address already.
124 func (t *torrent) addrActive(addr string) bool {
125 if _, ok := t.halfOpen[addr]; ok {
128 for _, c := range t.conns {
129 if c.remoteAddr().String() == addr {
136 func (t *torrent) worstConns(cl *Client) (wcs *worstConns) {
138 c: make([]*connection, 0, len(t.conns)),
142 for _, c := range t.conns {
143 if !c.closed.IsSet() {
144 wcs.c = append(wcs.c, c)
150 func (t *torrent) ceaseNetworking() {
152 case <-t.ceasingNetworking:
156 close(t.ceasingNetworking)
157 for _, c := range t.conns {
162 func (t *torrent) addPeer(p Peer, cl *Client) {
164 if len(t.peers) >= torrentPeersHighWater {
167 key := peersKey{string(p.IP), p.Port}
168 if _, ok := t.peers[key]; ok {
172 peersAddedBySource.Add(string(p.Source), 1)
177 func (t *torrent) invalidateMetadata() {
178 t.metadataBytes = nil
179 t.metadataCompletedChunks = nil
183 func (t *torrent) saveMetadataPiece(index int, data []byte) {
187 if index >= len(t.metadataCompletedChunks) {
188 log.Printf("%s: ignoring metadata piece %d", t, index)
191 copy(t.metadataBytes[(1<<14)*index:], data)
192 t.metadataCompletedChunks[index] = true
195 func (t *torrent) metadataPieceCount() int {
196 return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
199 func (t *torrent) haveMetadataPiece(piece int) bool {
201 return (1<<14)*piece < len(t.metadataBytes)
203 return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
207 func (t *torrent) metadataSizeKnown() bool {
208 return t.metadataBytes != nil
211 func (t *torrent) metadataSize() int {
212 return len(t.metadataBytes)
215 func infoPieceHashes(info *metainfo.Info) (ret []string) {
216 for i := 0; i < len(info.Pieces); i += 20 {
217 ret = append(ret, string(info.Pieces[i:i+20]))
222 // Called when metadata for a torrent becomes available.
223 func (t *torrent) setMetadata(md *metainfo.Info, infoBytes []byte) (err error) {
224 err = validateInfo(md)
226 err = fmt.Errorf("bad info: %s", err)
229 t.info = &metainfo.InfoEx{
234 t.storage, err = t.storageOpener.OpenTorrent(t.info)
239 for _, f := range t.info.UpvertedFiles() {
242 t.metadataBytes = infoBytes
243 t.metadataCompletedChunks = nil
244 hashes := infoPieceHashes(md)
245 t.pieces = make([]piece, len(hashes))
246 for i, hash := range hashes {
247 piece := &t.pieces[i]
250 piece.noPendingWrites.L = &piece.pendingWritesMutex
251 missinggo.CopyExact(piece.Hash[:], hash)
253 for _, conn := range t.conns {
254 if err := conn.setNumPieces(t.numPieces()); err != nil {
255 log.Printf("closing connection: %s", err)
259 for i := range t.pieces {
260 t.updatePieceCompletion(i)
261 t.pieces[i].QueuedForHash = true
264 for i := range t.pieces {
271 func (t *torrent) verifyPiece(piece int) {
272 t.cl.verifyPiece(t, piece)
275 func (t *torrent) haveAllMetadataPieces() bool {
279 if t.metadataCompletedChunks == nil {
282 for _, have := range t.metadataCompletedChunks {
290 func (t *torrent) setMetadataSize(bytes int64, cl *Client) {
292 // We already know the correct metadata size.
295 if bytes <= 0 || bytes > 10000000 { // 10MB, pulled from my ass.
296 log.Printf("received bad metadata size: %d", bytes)
299 if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
302 t.metadataBytes = make([]byte, bytes)
303 t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
304 for _, c := range t.conns {
305 cl.requestPendingMetadata(t, c)
310 // The current working name for the torrent. Either the name in the info dict,
311 // or a display name given such as by the dn value in a magnet link, or "".
312 func (t *torrent) Name() string {
319 func (t *torrent) pieceState(index int) (ret PieceState) {
320 p := &t.pieces[index]
321 ret.Priority = t.piecePriority(index)
322 if t.pieceComplete(index) {
325 if p.QueuedForHash || p.Hashing {
328 if !ret.Complete && t.piecePartiallyDownloaded(index) {
334 func (t *torrent) metadataPieceSize(piece int) int {
335 return metadataPieceSize(len(t.metadataBytes), piece)
338 func (t *torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
344 d["total_size"] = len(t.metadataBytes)
346 p, err := bencode.Marshal(d)
352 ExtendedID: byte(c.PeerExtensionIDs["ut_metadata"]),
353 ExtendedPayload: append(p, data...),
357 func (t *torrent) pieceStateRuns() (ret []PieceStateRun) {
358 rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
359 ret = append(ret, PieceStateRun{
360 PieceState: el.(PieceState),
364 for index := range t.pieces {
365 rle.Append(t.pieceState(index), 1)
371 // Produces a small string representing a PieceStateRun.
372 func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
373 ret = fmt.Sprintf("%d", psr.Length)
374 ret += func() string {
375 switch psr.Priority {
376 case PiecePriorityNext:
378 case PiecePriorityNormal:
380 case PiecePriorityReadahead:
382 case PiecePriorityNow:
400 func (t *torrent) writeStatus(w io.Writer, cl *Client) {
401 fmt.Fprintf(w, "Infohash: %x\n", t.InfoHash)
402 fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
404 fmt.Fprintf(w, "Metadata have: ")
405 for _, h := range t.metadataCompletedChunks {
406 fmt.Fprintf(w, "%c", func() rune {
416 fmt.Fprintf(w, "Piece length: %s\n", func() string {
418 return fmt.Sprint(t.usualPieceSize())
424 fmt.Fprintf(w, "Num Pieces: %d\n", t.numPieces())
425 fmt.Fprint(w, "Piece States:")
426 for _, psr := range t.pieceStateRuns() {
428 w.Write([]byte(pieceStateRunStatusChars(psr)))
432 fmt.Fprintf(w, "Reader Pieces:")
433 t.forReaderOffsetPieces(func(begin, end int) (again bool) {
434 fmt.Fprintf(w, " %d:%d", begin, end)
438 fmt.Fprintf(w, "Trackers: ")
439 for _, tier := range t.trackers {
440 for _, tr := range tier {
441 fmt.Fprintf(w, "%q ", tr)
445 fmt.Fprintf(w, "Pending peers: %d\n", len(t.peers))
446 fmt.Fprintf(w, "Half open: %d\n", len(t.halfOpen))
447 fmt.Fprintf(w, "Active peers: %d\n", len(t.conns))
448 sort.Sort(&worstConns{
453 for i, c := range t.conns {
454 fmt.Fprintf(w, "%2d. ", i+1)
459 func (t *torrent) String() string {
462 s = fmt.Sprintf("%x", t.InfoHash)
467 func (t *torrent) haveInfo() bool {
471 // TODO: Include URIs that weren't converted to tracker clients.
472 func (t *torrent) announceList() (al [][]string) {
473 missinggo.CastSlice(&al, t.trackers)
477 // Returns a run-time generated MetaInfo that includes the info bytes and
478 // announce-list as currently known to the client.
479 func (t *torrent) MetaInfo() *metainfo.MetaInfo {
480 if t.metadataBytes == nil {
481 panic("info bytes not set")
483 return &metainfo.MetaInfo{
485 CreationDate: time.Now().Unix(),
486 Comment: "dynamic metainfo from client",
487 CreatedBy: "go.torrent",
488 AnnounceList: t.announceList(),
492 func (t *torrent) bytesLeft() (left int64) {
493 for i := 0; i < t.numPieces(); i++ {
494 left += int64(t.pieces[i].bytesLeft())
499 // Bytes left to give in tracker announces.
500 func (t *torrent) bytesLeftAnnounce() uint64 {
502 return uint64(t.bytesLeft())
504 return math.MaxUint64
508 func (t *torrent) piecePartiallyDownloaded(piece int) bool {
509 if t.pieceComplete(piece) {
512 if t.pieceAllDirty(piece) {
515 return t.pieces[piece].hasDirtyChunks()
518 func numChunksForPiece(chunkSize int, pieceSize int) int {
519 return (pieceSize + chunkSize - 1) / chunkSize
522 func (t *torrent) usualPieceSize() int {
523 return int(t.info.PieceLength)
526 func (t *torrent) lastPieceSize() int {
527 return int(t.pieceLength(t.numPieces() - 1))
530 func (t *torrent) numPieces() int {
531 return t.info.NumPieces()
534 func (t *torrent) numPiecesCompleted() (num int) {
535 return t.completedPieces.Len()
538 // Safe to call with or without client lock.
539 func (t *torrent) isClosed() bool {
548 func (t *torrent) close() (err error) {
554 if c, ok := t.storage.(io.Closer); ok {
557 for _, conn := range t.conns {
560 t.pieceStateChanges.Close()
564 func (t *torrent) requestOffset(r request) int64 {
565 return torrentRequestOffset(t.length, int64(t.usualPieceSize()), r)
568 // Return the request that would include the given offset into the torrent
569 // data. Returns !ok if there is no such request.
570 func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
571 return torrentOffsetRequest(t.length, t.info.PieceLength, int64(t.chunkSize), off)
574 func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
575 tr := perf.NewTimer()
577 n, err := t.pieces[piece].Storage().WriteAt(data, begin)
578 if err == nil && n != len(data) {
579 err = io.ErrShortWrite
582 tr.Stop("write chunk")
587 func (t *torrent) bitfield() (bf []bool) {
588 bf = make([]bool, t.numPieces())
589 t.completedPieces.IterTyped(func(piece int) (again bool) {
596 func (t *torrent) validOutgoingRequest(r request) bool {
597 if r.Index >= pp.Integer(t.info.NumPieces()) {
600 if r.Begin%t.chunkSize != 0 {
603 if r.Length > t.chunkSize {
606 pieceLength := t.pieceLength(int(r.Index))
607 if r.Begin+r.Length > pieceLength {
610 return r.Length == t.chunkSize || r.Begin+r.Length == pieceLength
613 func (t *torrent) pieceChunks(piece int) (css []chunkSpec) {
614 css = make([]chunkSpec, 0, (t.pieceLength(piece)+t.chunkSize-1)/t.chunkSize)
616 for left := t.pieceLength(piece); left != 0; left -= cs.Length {
618 if cs.Length > t.chunkSize {
619 cs.Length = t.chunkSize
621 css = append(css, cs)
622 cs.Begin += cs.Length
627 func (t *torrent) pieceNumChunks(piece int) int {
628 return int((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
631 func (t *torrent) pendAllChunkSpecs(pieceIndex int) {
632 t.pieces[pieceIndex].DirtyChunks.Clear()
640 // Peer is known to support encryption.
641 SupportsEncryption bool
644 func (t *torrent) pieceLength(piece int) (len_ pp.Integer) {
645 if piece < 0 || piece >= t.info.NumPieces() {
648 if int(piece) == t.numPieces()-1 {
649 len_ = pp.Integer(t.length % t.info.PieceLength)
652 len_ = pp.Integer(t.info.PieceLength)
657 func (t *torrent) hashPiece(piece int) (ret pieceSum) {
658 hash := pieceHash.New()
659 p := &t.pieces[piece]
660 p.waitNoPendingWrites()
661 ip := t.info.Piece(piece)
663 n, err := io.Copy(hash, io.NewSectionReader(t.pieces[piece].Storage(), 0, pl))
665 missinggo.CopyExact(&ret, hash.Sum(nil))
668 if err != io.ErrUnexpectedEOF {
669 log.Printf("unexpected error hashing piece with %T: %s", t.storage, err)
674 func (t *torrent) haveAllPieces() bool {
678 return t.completedPieces.Len() == t.numPieces()
681 func (me *torrent) haveAnyPieces() bool {
682 for i := range me.pieces {
683 if me.pieceComplete(i) {
690 func (t *torrent) havePiece(index int) bool {
691 return t.haveInfo() && t.pieceComplete(index)
694 func (t *torrent) haveChunk(r request) (ret bool) {
696 // log.Println("have chunk", r, ret)
701 if t.pieceComplete(int(r.Index)) {
704 p := &t.pieces[r.Index]
705 return !p.pendingChunk(r.chunkSpec, t.chunkSize)
708 func chunkIndex(cs chunkSpec, chunkSize pp.Integer) int {
709 return int(cs.Begin / chunkSize)
712 // TODO: This should probably be called wantPiece.
713 func (t *torrent) wantChunk(r request) bool {
714 if !t.wantPiece(int(r.Index)) {
717 if t.pieces[r.Index].pendingChunk(r.chunkSpec, t.chunkSize) {
720 // TODO: What about pieces that were wanted, but aren't now, and aren't
721 // completed either? That used to be done here.
725 // TODO: This should be called wantPieceIndex.
726 func (t *torrent) wantPiece(index int) bool {
730 p := &t.pieces[index]
737 if t.pieceComplete(index) {
740 if t.pendingPieces.Contains(index) {
743 return !t.forReaderOffsetPieces(func(begin, end int) bool {
744 return index < begin || index >= end
748 func (t *torrent) forNeededPieces(f func(piece int) (more bool)) (all bool) {
749 return t.forReaderOffsetPieces(func(begin, end int) (more bool) {
750 for i := begin; begin < end; i++ {
759 func (t *torrent) connHasWantedPieces(c *connection) bool {
760 return !c.pieceRequestOrder.IsEmpty()
763 func (t *torrent) extentPieces(off, _len int64) (pieces []int) {
764 for i := off / int64(t.usualPieceSize()); i*int64(t.usualPieceSize()) < off+_len; i++ {
765 pieces = append(pieces, int(i))
770 func (t *torrent) worstBadConn(cl *Client) *connection {
771 wcs := t.worstConns(cl)
774 c := heap.Pop(wcs).(*connection)
775 if c.UnwantedChunksReceived >= 6 && c.UnwantedChunksReceived > c.UsefulChunksReceived {
778 if wcs.Len() >= (socketsPerTorrent+1)/2 {
779 // Give connections 1 minute to prove themselves.
780 if time.Since(c.completedHandshake) > time.Minute {
788 type PieceStateChange struct {
793 func (t *torrent) publishPieceChange(piece int) {
794 cur := t.pieceState(piece)
795 p := &t.pieces[piece]
796 if cur != p.PublicPieceState {
797 p.PublicPieceState = cur
798 t.pieceStateChanges.Publish(PieceStateChange{
805 func (t *torrent) pieceNumPendingChunks(piece int) int {
806 if t.pieceComplete(piece) {
809 return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
812 func (t *torrent) pieceAllDirty(piece int) bool {
813 return t.pieces[piece].DirtyChunks.Len() == t.pieceNumChunks(piece)
816 func (t *torrent) forUrgentPieces(f func(piece int) (again bool)) (all bool) {
817 return t.forReaderOffsetPieces(func(begin, end int) (again bool) {
827 func (t *torrent) readersChanged() {
828 t.updatePiecePriorities()
831 func (t *torrent) maybeNewConns() {
832 // Tickle the accept routine.
833 t.cl.event.Broadcast()
837 func (t *torrent) piecePriorityChanged(piece int) {
838 for _, c := range t.conns {
839 c.updatePiecePriority(piece)
842 t.publishPieceChange(piece)
845 func (t *torrent) updatePiecePriority(piece int) bool {
846 p := &t.pieces[piece]
847 newPrio := t.piecePriorityUncached(piece)
848 if newPrio == p.priority {
855 // Update all piece priorities in one hit. This function should have the same
856 // output as updatePiecePriority, but across all pieces.
857 func (t *torrent) updatePiecePriorities() {
858 newPrios := make([]piecePriority, t.numPieces())
859 t.pendingPieces.IterTyped(func(piece int) (more bool) {
860 newPrios[piece] = PiecePriorityNormal
863 t.forReaderOffsetPieces(func(begin, end int) (next bool) {
865 newPrios[begin].Raise(PiecePriorityNow)
867 for i := begin + 1; i < end; i++ {
868 newPrios[i].Raise(PiecePriorityReadahead)
872 t.completedPieces.IterTyped(func(piece int) (more bool) {
873 newPrios[piece] = PiecePriorityNone
876 for i, prio := range newPrios {
877 if prio != t.pieces[i].priority {
878 t.pieces[i].priority = prio
879 t.piecePriorityChanged(i)
884 func (t *torrent) byteRegionPieces(off, size int64) (begin, end int) {
895 begin = int(off / t.info.PieceLength)
896 end = int((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
897 if end > t.info.NumPieces() {
898 end = t.info.NumPieces()
903 // Returns true if all iterations complete without breaking.
904 func (t *torrent) forReaderOffsetPieces(f func(begin, end int) (more bool)) (all bool) {
905 // There's an oppurtunity here to build a map of beginning pieces, and a
906 // bitmap of the rest. I wonder if it's worth the allocation overhead.
907 for r := range t.readers {
909 pos, readahead := r.pos, r.readahead
914 begin, end := t.byteRegionPieces(pos, readahead)
925 func (t *torrent) piecePriority(piece int) piecePriority {
927 return PiecePriorityNone
929 return t.pieces[piece].priority
932 func (t *torrent) piecePriorityUncached(piece int) (ret piecePriority) {
933 ret = PiecePriorityNone
934 if t.pieceComplete(piece) {
937 if t.pendingPieces.Contains(piece) {
938 ret = PiecePriorityNormal
940 raiseRet := ret.Raise
941 t.forReaderOffsetPieces(func(begin, end int) (again bool) {
943 raiseRet(PiecePriorityNow)
945 if begin <= piece && piece < end {
946 raiseRet(PiecePriorityReadahead)
953 func (t *torrent) pendPiece(piece int) {
954 if t.pendingPieces.Contains(piece) {
957 if t.havePiece(piece) {
960 t.pendingPieces.Add(piece)
961 if !t.updatePiecePriority(piece) {
964 t.piecePriorityChanged(piece)
967 func (t *torrent) getCompletedPieces() (ret bitmap.Bitmap) {
968 return t.completedPieces.Copy()
971 func (t *torrent) unpendPieces(unpend *bitmap.Bitmap) {
972 t.pendingPieces.Sub(unpend)
973 t.updatePiecePriorities()
976 func (t *torrent) pendPieceRange(begin, end int) {
977 for i := begin; i < end; i++ {
982 func (t *torrent) unpendPieceRange(begin, end int) {
984 bm.AddRange(begin, end)
988 func (t *torrent) connRequestPiecePendingChunks(c *connection, piece int) (more bool) {
989 if !c.PeerHasPiece(piece) {
992 chunkIndices := t.pieces[piece].undirtiedChunkIndices().ToSortedSlice()
993 return itertools.ForPerm(len(chunkIndices), func(i int) bool {
994 req := request{pp.Integer(piece), t.chunkIndexSpec(chunkIndices[i], piece)}
995 return c.Request(req)
999 func (t *torrent) pendRequest(req request) {
1000 ci := chunkIndex(req.chunkSpec, t.chunkSize)
1001 t.pieces[req.Index].pendChunkIndex(ci)
1004 func (t *torrent) pieceChanged(piece int) {
1005 t.cl.pieceChanged(t, piece)
1008 func (t *torrent) openNewConns() {
1009 t.cl.openNewConns(t)
1012 func (t *torrent) getConnPieceInclination() []int {
1013 _ret := t.connPieceInclinationPool.Get()
1015 pieceInclinationsNew.Add(1)
1016 return rand.Perm(t.numPieces())
1018 pieceInclinationsReused.Add(1)
1022 func (t *torrent) putPieceInclination(pi []int) {
1023 t.connPieceInclinationPool.Put(pi)
1024 pieceInclinationsPut.Add(1)
1027 func (t *torrent) updatePieceCompletion(piece int) {
1028 pcu := t.pieceCompleteUncached(piece)
1029 changed := t.completedPieces.Get(piece) != pcu
1030 t.completedPieces.Set(piece, pcu)
1032 t.pieceChanged(piece)
1036 // Non-blocking read. Client lock is not required.
1037 func (t *torrent) readAt(b []byte, off int64) (n int, err error) {
1038 p := &t.pieces[off/t.info.PieceLength]
1039 p.waitNoPendingWrites()
1040 return p.Storage().ReadAt(b, off-p.Info().Offset())
1043 func (t *torrent) updateAllPieceCompletions() {
1044 for i := range iter.N(t.numPieces()) {
1045 t.updatePieceCompletion(i)