2 Package torrent implements a torrent client.
9 if err := c.AddTorrent(externalMetaInfoPackageSux); err != nil {
10 return fmt.Errors("error adding torrent: %s", err)
13 log.Print("erhmahgerd, torrent downloaded")
19 "bitbucket.org/anacrolix/go.torrent/dht"
20 "bitbucket.org/anacrolix/go.torrent/util"
36 "github.com/anacrolix/libtorgo/metainfo"
37 "github.com/nsf/libtorgo/bencode"
39 pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
40 "bitbucket.org/anacrolix/go.torrent/tracker"
41 _ "bitbucket.org/anacrolix/go.torrent/tracker/udp"
44 // Currently doesn't really queue, but should in the future.
45 func (cl *Client) queuePieceCheck(t *torrent, pieceIndex pp.Integer) {
46 piece := t.Pieces[pieceIndex]
47 if piece.QueuedForHash {
50 piece.QueuedForHash = true
51 go cl.verifyPiece(t, pieceIndex)
54 // Queues the torrent data for the given region for download. The beginning of
55 // the region is given highest priority to allow a subsequent read at the same
56 // offset to return data ASAP.
57 func (me *Client) PrioritizeDataRegion(ih InfoHash, off, len_ int64) error {
62 return errors.New("no such active torrent")
65 return errors.New("missing metadata")
67 newPriorities := make([]request, 0, (len_+chunkSize-1)/chunkSize)
69 req, ok := t.offsetRequest(off)
71 return errors.New("bad offset")
73 reqOff := t.requestOffset(req)
74 // Gain the alignment adjustment.
76 // Lose the length of this block.
77 len_ -= int64(req.Length)
78 off = reqOff + int64(req.Length)
79 if !t.wantPiece(int(req.Index)) {
82 newPriorities = append(newPriorities, req)
84 if len(newPriorities) == 0 {
87 t.Priorities.PushFront(newPriorities[0])
88 for _, req := range newPriorities[1:] {
89 t.Priorities.PushBack(req)
91 for _, cn := range t.Conns {
92 me.replenishConnRequests(t, cn)
97 type dataSpec struct {
106 Listener net.Listener
108 DownloadStrategy DownloadStrategy
116 torrents map[InfoHash]*torrent
117 dataWaiter chan struct{}
120 func (cl *Client) WriteStatus(w io.Writer) {
123 fmt.Fprintf(w, "Half open: %d\n", cl.halfOpen)
124 fmt.Fprintf(w, "DHT nodes: %d\n", cl.DHT.NumNodes())
126 for _, t := range cl.torrents {
127 fmt.Fprintf(w, "%s: %f%%\n", t.Name(), func() float32 {
131 return 100 * (1 - float32(t.BytesLeft())/float32(t.Length()))
139 // Read torrent data at the given offset. Returns ErrDataNotReady if the data
141 func (cl *Client) TorrentReadAt(ih InfoHash, off int64, p []byte) (n int, err error) {
146 err = errors.New("unknown torrent")
149 index := pp.Integer(off / int64(t.UsualPieceSize()))
150 // Reading outside the bounds of a file is an error.
155 if int(index) >= len(t.Pieces) {
159 t.lastReadPiece = int(index)
160 piece := t.Pieces[index]
161 pieceOff := pp.Integer(off % int64(t.PieceLength(0)))
162 high := int(t.PieceLength(index) - pieceOff)
166 for cs, _ := range piece.PendingChunkSpecs {
167 chunkOff := int64(pieceOff) - int64(cs.Begin)
168 if chunkOff >= int64(t.PieceLength(index)) {
171 if 0 <= chunkOff && chunkOff < int64(cs.Length) {
172 // read begins in a pending chunk
173 err = ErrDataNotReady
176 // pending chunk caps available data
177 if chunkOff < 0 && int64(len(p)) > -chunkOff {
181 return t.Data.ReadAt(p, off)
184 // Starts the client. Defaults are applied. The client will begin accepting
185 // connections and tracking.
186 func (c *Client) Start() {
188 c.torrents = make(map[InfoHash]*torrent)
189 if c.HalfOpenLimit == 0 {
192 o := copy(c.PeerId[:], BEP20)
193 _, err := rand.Read(c.PeerId[o:])
195 panic("error generating peer id")
197 c.quit = make(chan struct{})
198 if c.DownloadStrategy == nil {
199 c.DownloadStrategy = &DefaultDownloadStrategy{}
201 if c.Listener != nil {
202 go c.acceptConnections()
206 func (cl *Client) stopped() bool {
215 // Stops the client. All connections to peers are closed and all activity will
217 func (me *Client) Stop() {
221 for _, t := range me.torrents {
222 for _, c := range t.Conns {
229 func (cl *Client) acceptConnections() {
231 conn, err := cl.Listener.Accept()
244 // log.Printf("accepted connection from %s", conn.RemoteAddr())
246 if err := cl.runConnection(conn, nil, peerSourceIncoming); err != nil {
253 func (me *Client) torrent(ih InfoHash) *torrent {
254 for _, t := range me.torrents {
255 if t.InfoHash == ih {
262 func (me *Client) initiateConn(peer Peer, torrent *torrent) {
263 if peer.Id == me.PeerId {
268 addr := &net.TCPAddr{
272 conn, err := net.DialTimeout(addr.Network(), addr.String(), dialTimeout)
277 if me.halfOpen == 0 {
284 if netOpErr, ok := err.(*net.OpError); ok {
285 if netOpErr.Timeout() {
288 switch netOpErr.Err {
289 case syscall.ECONNREFUSED, syscall.EHOSTUNREACH:
294 log.Printf("error connecting to peer: %s %#v", err, err)
297 // log.Printf("connected to %s", conn.RemoteAddr())
298 err = me.runConnection(conn, torrent, peer.Source)
305 func (cl *Client) incomingPeerPort() int {
306 if cl.Listener == nil {
309 _, p, err := net.SplitHostPort(cl.Listener.Addr().String())
314 _, err = fmt.Sscanf(p, "%d", &i)
321 func (me *Client) runConnection(sock net.Conn, torrent *torrent, discovery peerSource) (err error) {
323 Discovery: discovery,
327 write: make(chan []byte),
328 post: make(chan pp.Message),
329 PeerMaxRequests: 250, // Default in libtorrent is 250.
332 // There's a lock and deferred unlock later in this function. The
333 // client will not be locked when this deferred is invoked.
339 // go conn.writeOptimizer()
340 conn.write <- pp.Bytes(pp.Protocol)
341 conn.write <- pp.Bytes("\x00\x00\x00\x00\x00\x10\x00\x00")
343 conn.write <- pp.Bytes(torrent.InfoHash[:])
344 conn.write <- pp.Bytes(me.PeerId[:])
347 _, err = io.ReadFull(conn.Socket, b[:])
352 err = fmt.Errorf("when reading protocol and extensions: %s", err)
355 if string(b[:20]) != pp.Protocol {
356 // err = fmt.Errorf("wrong protocol: %#v", string(b[:20]))
359 if 8 != copy(conn.PeerExtensions[:], b[20:]) {
362 // log.Printf("peer extensions: %#v", string(conn.PeerExtensions[:]))
363 var infoHash [20]byte
364 _, err = io.ReadFull(conn.Socket, infoHash[:])
366 return fmt.Errorf("reading peer info hash: %s", err)
368 _, err = io.ReadFull(conn.Socket, conn.PeerId[:])
370 return fmt.Errorf("reading peer id: %s", err)
373 torrent = me.torrent(infoHash)
377 conn.write <- pp.Bytes(torrent.InfoHash[:])
378 conn.write <- pp.Bytes(me.PeerId[:])
382 if !me.addConnection(torrent, conn) {
385 go conn.writeOptimizer(time.Minute)
386 if conn.PeerExtensions[5]&0x10 != 0 {
387 conn.Post(pp.Message{
389 ExtendedID: pp.HandshakeExtendedID,
390 ExtendedPayload: func() []byte {
391 d := map[string]interface{}{
396 "v": "go.torrent dev",
398 if torrent.metadataSizeKnown() {
399 d["metadata_size"] = torrent.metadataSize()
401 if p := me.incomingPeerPort(); p != 0 {
404 b, err := bencode.Marshal(d)
412 if torrent.haveAnyPieces() {
413 conn.Post(pp.Message{
415 Bitfield: torrent.bitfield(),
418 err = me.connectionLoop(torrent, conn)
420 err = fmt.Errorf("during Connection loop: %s", err)
422 me.dropConnection(torrent, conn)
426 func (me *Client) peerGotPiece(t *torrent, c *connection, piece int) {
427 for piece >= len(c.PeerPieces) {
428 c.PeerPieces = append(c.PeerPieces, false)
430 c.PeerPieces[piece] = true
431 if t.wantPiece(piece) {
432 me.replenishConnRequests(t, c)
436 func (me *Client) peerUnchoked(torrent *torrent, conn *connection) {
437 me.replenishConnRequests(torrent, conn)
440 func (cl *Client) connCancel(t *torrent, cn *connection, r request) (ok bool) {
443 cl.DownloadStrategy.DeleteRequest(t, r)
448 func (cl *Client) connDeleteRequest(t *torrent, cn *connection, r request) {
449 if !cn.RequestPending(r) {
452 cl.DownloadStrategy.DeleteRequest(t, r)
453 delete(cn.Requests, r)
456 func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
461 for index := 0; index < t.MetadataPieceCount(); index++ {
462 if !t.HaveMetadataPiece(index) {
463 pending = append(pending, index)
466 for _, i := range mathRand.Perm(len(pending)) {
469 ExtendedID: byte(c.PeerExtensionIDs["ut_metadata"]),
470 ExtendedPayload: func() []byte {
471 b, err := bencode.Marshal(map[string]int{
484 func (cl *Client) completedMetadata(t *torrent) {
488 copy(ih[:], h.Sum(nil)[:])
489 if ih != t.InfoHash {
490 log.Print("bad metadata")
491 t.InvalidateMetadata()
494 var info metainfo.Info
495 err := bencode.Unmarshal(t.MetaData, &info)
497 log.Printf("error unmarshalling metadata: %s", err)
498 t.InvalidateMetadata()
501 // TODO(anacrolix): If this fails, I think something harsher should be
503 err = cl.setMetaData(t, info, t.MetaData)
505 log.Printf("error setting metadata: %s", err)
506 t.InvalidateMetadata()
509 log.Printf("%s: got metadata from peers", t)
512 func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *torrent, c *connection) (err error) {
514 err = bencode.Unmarshal(payload, &d)
516 err = fmt.Errorf("error unmarshalling payload: %s: %q", err, payload)
519 msgType, ok := d["msg_type"]
521 err = errors.New("missing msg_type field")
526 case pp.DataMetadataExtensionMsgType:
530 t.SaveMetadataPiece(piece, payload[len(payload)-metadataPieceSize(d["total_size"], piece):])
531 if !t.HaveAllMetadataPieces() {
534 cl.completedMetadata(t)
535 case pp.RequestMetadataExtensionMsgType:
536 if !t.HaveMetadataPiece(piece) {
537 c.Post(t.NewMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
540 c.Post(t.NewMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[(1<<14)*piece:(1<<14)*piece+t.metadataPieceSize(piece)]))
541 case pp.RejectMetadataExtensionMsgType:
543 err = errors.New("unknown msg_type value")
548 type peerExchangeMessage struct {
549 Added util.CompactPeers `bencode:"added"`
550 AddedFlags []byte `bencode:"added.f"`
551 Dropped []tracker.Peer `bencode:"dropped"`
554 func (me *Client) connectionLoop(t *torrent, c *connection) error {
555 decoder := pp.Decoder{
556 R: bufio.NewReader(c.Socket),
557 MaxLength: 256 * 1024,
562 err := decoder.Decode(&msg)
568 if me.stopped() || err == io.EOF {
579 for r := range c.Requests {
580 me.connDeleteRequest(t, c, r)
584 me.peerUnchoked(t, c)
586 c.PeerInterested = true
587 // TODO: This should be done from a dedicated unchoking routine.
589 case pp.NotInterested:
590 c.PeerInterested = false
593 me.peerGotPiece(t, c, int(msg.Index))
595 if c.PeerRequests == nil {
596 c.PeerRequests = make(map[request]struct{}, maxRequests)
598 request := newRequest(msg.Index, msg.Begin, msg.Length)
599 // TODO: Requests should be satisfied from a dedicated upload routine.
600 // c.PeerRequests[request] = struct{}{}
601 p := make([]byte, msg.Length)
602 n, err := t.Data.ReadAt(p, int64(t.PieceLength(0))*int64(msg.Index)+int64(msg.Begin))
604 return fmt.Errorf("reading t data to serve request %q: %s", request, err)
606 if n != int(msg.Length) {
607 return fmt.Errorf("bad request: %v", msg)
616 req := newRequest(msg.Index, msg.Begin, msg.Length)
617 if !c.PeerCancel(req) {
618 log.Printf("received unexpected cancel: %v", req)
621 if c.PeerPieces != nil {
622 err = errors.New("received unexpected bitfield")
626 if len(msg.Bitfield) < t.NumPieces() {
627 err = errors.New("received invalid bitfield")
630 msg.Bitfield = msg.Bitfield[:t.NumPieces()]
632 c.PeerPieces = msg.Bitfield
633 for index, has := range c.PeerPieces {
635 me.peerGotPiece(t, c, index)
639 err = me.downloadedChunk(t, c, &msg)
641 switch msg.ExtendedID {
642 case pp.HandshakeExtendedID:
643 // TODO: Create a bencode struct for this.
644 var d map[string]interface{}
645 err = bencode.Unmarshal(msg.ExtendedPayload, &d)
647 err = fmt.Errorf("error decoding extended message payload: %s", err)
650 if reqq, ok := d["reqq"]; ok {
651 if i, ok := reqq.(int64); ok {
652 c.PeerMaxRequests = int(i)
655 if v, ok := d["v"]; ok {
656 c.PeerClientName = v.(string)
660 err = errors.New("handshake missing m item")
663 mTyped, ok := m.(map[string]interface{})
665 err = errors.New("handshake m value is not dict")
668 if c.PeerExtensionIDs == nil {
669 c.PeerExtensionIDs = make(map[string]int64, len(mTyped))
671 for name, v := range mTyped {
674 log.Printf("bad handshake m item extension ID type: %T", v)
678 delete(c.PeerExtensionIDs, name)
680 c.PeerExtensionIDs[name] = id
683 metadata_sizeUntyped, ok := d["metadata_size"]
685 metadata_size, ok := metadata_sizeUntyped.(int64)
687 log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
689 t.SetMetadataSize(metadata_size)
692 if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
693 me.requestPendingMetadata(t, c)
696 err = me.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
698 err = fmt.Errorf("error handling metadata extension message: %s", err)
701 var pexMsg peerExchangeMessage
702 err := bencode.Unmarshal(msg.ExtendedPayload, &pexMsg)
704 err = fmt.Errorf("error unmarshalling PEX message: %s", err)
708 err := me.AddPeers(t.InfoHash, func() (ret []Peer) {
709 for _, cp := range pexMsg.Added {
713 Source: peerSourcePEX,
715 if n := copy(p.IP, cp.IP[:]); n != 4 {
723 log.Printf("error adding PEX peers: %s", err)
726 log.Printf("added %d peers from PEX", len(pexMsg.Added))
729 err = fmt.Errorf("unexpected extended message ID: %v", msg.ExtendedID)
732 err = fmt.Errorf("received unknown message type: %#v", msg.Type)
740 func (me *Client) dropConnection(torrent *torrent, conn *connection) {
742 for r := range conn.Requests {
743 me.connDeleteRequest(torrent, conn, r)
745 for i0, c := range torrent.Conns {
749 i1 := len(torrent.Conns) - 1
751 torrent.Conns[i0] = torrent.Conns[i1]
753 torrent.Conns = torrent.Conns[:i1]
756 panic("connection not found")
759 func (me *Client) addConnection(t *torrent, c *connection) bool {
763 for _, c0 := range t.Conns {
764 if c.PeerId == c0.PeerId {
765 // Already connected to a client with that ID.
769 t.Conns = append(t.Conns, c)
773 func (me *Client) openNewConns() {
774 for _, t := range me.torrents {
775 for len(t.Peers) != 0 {
776 if me.halfOpen >= me.HalfOpenLimit {
780 t.Peers = t.Peers[1:]
781 me.initiateConn(p, t)
786 // Adds peers to the swarm for the torrent corresponding to infoHash.
787 func (me *Client) AddPeers(infoHash InfoHash, peers []Peer) error {
789 t := me.torrent(infoHash)
791 return errors.New("no such torrent")
793 t.Peers = append(t.Peers, peers...)
799 func (cl *Client) setMetaData(t *torrent, md metainfo.Info, bytes []byte) (err error) {
800 err = t.setMetadata(md, cl.DataDir, bytes)
804 // Queue all pieces for hashing. This is done sequentially to avoid
805 // spamming goroutines.
806 for _, p := range t.Pieces {
807 p.QueuedForHash = true
810 for i := range t.Pieces {
811 cl.verifyPiece(t, pp.Integer(i))
815 cl.DownloadStrategy.TorrentStarted(t)
819 // Prepare a Torrent without any attachment to a Client. That means we can
820 // initialize fields all fields that don't require the Client without locking
822 func newTorrent(ih InfoHash, announceList [][]string) (t *torrent, err error) {
826 t.Trackers = make([][]tracker.Client, len(announceList))
827 for tierIndex := range announceList {
828 tier := t.Trackers[tierIndex]
829 for _, url := range announceList[tierIndex] {
830 tr, err := tracker.New(url)
835 tier = append(tier, tr)
837 // The trackers within each tier must be shuffled before use.
838 // http://stackoverflow.com/a/12267471/149482
839 // http://www.bittorrent.org/beps/bep_0012.html#order-of-processing
840 for i := range tier {
841 j := mathRand.Intn(i + 1)
842 tier[i], tier[j] = tier[j], tier[i]
844 t.Trackers[tierIndex] = tier
849 func (cl *Client) AddMagnet(uri string) (err error) {
850 m, err := ParseMagnetURI(uri)
854 t, err := newTorrent(m.InfoHash, [][]string{m.Trackers})
858 t.DisplayName = m.DisplayName
861 err = cl.addTorrent(t)
868 func (me *Client) addTorrent(t *torrent) (err error) {
869 if _, ok := me.torrents[t.InfoHash]; ok {
870 err = fmt.Errorf("torrent infohash collision")
873 me.torrents[t.InfoHash] = t
874 if !me.DisableTrackers {
875 go me.announceTorrent(t)
878 go me.announceTorrentDHT(t)
883 // Adds the torrent to the client.
884 func (me *Client) AddTorrent(metaInfo *metainfo.MetaInfo) (err error) {
885 t, err := newTorrent(BytesInfoHash(metaInfo.Info.Hash), metaInfo.AnnounceList)
891 err = me.addTorrent(t)
895 err = me.setMetaData(t, metaInfo.Info.Info, metaInfo.Info.Bytes)
902 func (cl *Client) listenerAnnouncePort() (port int16) {
908 switch data := addr.(type) {
910 return int16(data.Port)
912 return int16(data.Port)
914 log.Printf("unknown listener addr type: %T", addr)
919 func (cl *Client) announceTorrentDHT(t *torrent) {
921 ps, err := cl.DHT.GetPeers(string(t.InfoHash[:]))
923 log.Printf("error getting peers from dht: %s", err)
926 nextScrape := time.After(1 * time.Minute)
932 case cps, ok := <-ps.Values:
936 err = cl.AddPeers(t.InfoHash, func() (ret []Peer) {
937 for _, cp := range cps {
938 ret = append(ret, Peer{
941 Source: peerSourceDHT,
943 // log.Printf("peer from dht: %s", &net.UDPAddr{
945 // Port: int(cp.Port),
951 log.Printf("error adding peers from dht for torrent %q: %s", t, err)
954 // log.Printf("got %d peers from dht for torrent %q", len(cps), t)
961 func (cl *Client) announceTorrent(t *torrent) {
962 req := tracker.AnnounceRequest{
963 Event: tracker.Started,
965 Port: cl.listenerAnnouncePort(),
967 InfoHash: t.InfoHash,
972 req.Left = t.BytesLeft()
974 for _, tier := range t.Trackers {
975 for trIndex, tr := range tier {
976 if err := tr.Connect(); err != nil {
980 resp, err := tr.Announce(&req)
986 for _, peer := range resp.Peers {
987 peers = append(peers, Peer{
992 err = cl.AddPeers(t.InfoHash, peers)
996 log.Printf("%s: %d new peers from %s", t, len(peers), tr)
998 tier[0], tier[trIndex] = tier[trIndex], tier[0]
999 time.Sleep(time.Second * time.Duration(resp.Interval))
1000 req.Event = tracker.None
1001 continue newAnnounce
1004 time.Sleep(5 * time.Second)
1008 func (cl *Client) allTorrentsCompleted() bool {
1009 for _, t := range cl.torrents {
1010 if !t.haveAllPieces() {
1017 // Returns true when all torrents are completely downloaded and false if the
1018 // client is stopped before that.
1019 func (me *Client) WaitAll() bool {
1021 defer me.mu.Unlock()
1022 for !me.allTorrentsCompleted() {
1031 func (cl *Client) assertRequestHeat() {
1032 dds, ok := cl.DownloadStrategy.(*DefaultDownloadStrategy)
1036 for _, t := range cl.torrents {
1037 m := make(map[request]int, 3000)
1038 for _, cn := range t.Conns {
1039 for r := range cn.Requests {
1043 for r, h := range dds.heat[t] {
1045 panic(fmt.Sprintln(m[r], h))
1051 func (me *Client) replenishConnRequests(t *torrent, c *connection) {
1055 me.DownloadStrategy.FillRequests(t, c)
1056 //me.assertRequestHeat()
1057 if len(c.Requests) == 0 && !c.PeerChoked {
1058 c.SetInterested(false)
1062 func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
1063 req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
1065 // Request has been satisfied.
1066 me.connDeleteRequest(t, c, req)
1068 defer me.replenishConnRequests(t, c)
1070 // Do we actually want this chunk?
1071 if _, ok := t.Pieces[req.Index].PendingChunkSpecs[req.chunkSpec]; !ok {
1072 log.Printf("got unnecessary chunk from %v: %q", req, string(c.PeerId[:]))
1076 // Write the chunk out.
1077 err := t.WriteChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
1082 // Record that we have the chunk.
1083 delete(t.Pieces[req.Index].PendingChunkSpecs, req.chunkSpec)
1084 t.PiecesByBytesLeft.ValueChanged(t.Pieces[req.Index].bytesLeftElement)
1085 if len(t.Pieces[req.Index].PendingChunkSpecs) == 0 {
1086 me.queuePieceCheck(t, req.Index)
1089 // Unprioritize the chunk.
1090 var next *list.Element
1091 for e := t.Priorities.Front(); e != nil; e = next {
1093 if e.Value.(request) == req {
1094 t.Priorities.Remove(e)
1098 // Cancel pending requests for this chunk.
1100 for _, c := range t.Conns {
1101 if me.connCancel(t, c, req) {
1103 me.replenishConnRequests(t, c)
1107 log.Printf("cancelled concurrent requests for %v", req)
1110 me.dataReady(dataSpec{t.InfoHash, req})
1114 func (cl *Client) dataReady(ds dataSpec) {
1115 if cl.dataWaiter != nil {
1116 close(cl.dataWaiter)
1121 // Returns a channel that is closed when new data has become available in the
1123 func (me *Client) DataWaiter() <-chan struct{} {
1125 defer me.mu.Unlock()
1126 if me.dataWaiter == nil {
1127 me.dataWaiter = make(chan struct{})
1129 return me.dataWaiter
1132 func (me *Client) pieceHashed(t *torrent, piece pp.Integer, correct bool) {
1133 p := t.Pieces[piece]
1136 p.PendingChunkSpecs = nil
1137 // log.Printf("%s: got piece %d, (%d/%d)", t, piece, t.NumPiecesCompleted(), t.NumPieces())
1138 var next *list.Element
1139 for e := t.Priorities.Front(); e != nil; e = next {
1141 if e.Value.(request).Index == piece {
1142 t.Priorities.Remove(e)
1145 me.dataReady(dataSpec{
1149 chunkSpec{0, pp.Integer(t.PieceLength(piece))},
1153 if len(p.PendingChunkSpecs) == 0 {
1154 t.pendAllChunkSpecs(piece)
1157 for _, conn := range t.Conns {
1159 conn.Post(pp.Message{
1161 Index: pp.Integer(piece),
1163 // TODO: Cancel requests for this piece.
1165 if conn.PeerHasPiece(piece) {
1166 me.replenishConnRequests(t, conn)
1170 me.event.Broadcast()
1173 func (cl *Client) verifyPiece(t *torrent, index pp.Integer) {
1175 p := t.Pieces[index]
1180 p.QueuedForHash = false
1182 sum := t.HashPiece(index)
1185 cl.pieceHashed(t, index, sum == p.Hash)
1189 func (me *Client) Torrents() (ret []*torrent) {
1191 for _, t := range me.torrents {
1192 ret = append(ret, t)