}
fmt.Fprint(w, "\n")
if t.haveInfo() {
- fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.BytesLeft())/float32(t.Length())), t.Length())
+ fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.Length())), t.Length())
} else {
w.WriteString("<missing metainfo>")
}
func (cl *Client) torrentReadAt(t *torrent, off int64, p []byte) (n int, err error) {
cl.mu.Lock()
defer cl.mu.Unlock()
- index := int(off / int64(t.UsualPieceSize()))
+ index := int(off / int64(t.usualPieceSize()))
// Reading outside the bounds of a file is an error.
if index < 0 {
err = os.ErrInvalid
return
}
piece := t.Pieces[index]
- pieceOff := pp.Integer(off % int64(t.UsualPieceSize()))
+ pieceOff := pp.Integer(off % int64(t.usualPieceSize()))
pieceLeft := int(t.PieceLength(pp.Integer(index)) - pieceOff)
if pieceLeft <= 0 {
err = io.EOF
}
func (cl *Client) readRaisePiecePriorities(t *torrent, off, _len int64) {
- index := int(off / int64(t.UsualPieceSize()))
+ index := int(off / int64(t.usualPieceSize()))
cl.raisePiecePriority(t, index, piecePriorityNow)
index++
if index >= t.numPieces() {
return
}
var pending []int
- for index := 0; index < t.MetadataPieceCount(); index++ {
- if !t.HaveMetadataPiece(index) {
+ for index := 0; index < t.metadataPieceCount(); index++ {
+ if !t.haveMetadataPiece(index) {
pending = append(pending, index)
}
}
CopyExact(&ih, h.Sum(nil))
if ih != t.InfoHash {
log.Print("bad metadata")
- t.InvalidateMetadata()
+ t.invalidateMetadata()
return
}
var info metainfo.Info
err := bencode.Unmarshal(t.MetaData, &info)
if err != nil {
log.Printf("error unmarshalling metadata: %s", err)
- t.InvalidateMetadata()
+ t.invalidateMetadata()
return
}
// TODO(anacrolix): If this fails, I think something harsher should be
err = cl.setMetaData(t, info, t.MetaData)
if err != nil {
log.Printf("error setting metadata: %s", err)
- t.InvalidateMetadata()
+ t.invalidateMetadata()
return
}
log.Printf("%s: got metadata from peers", t)
t.SaveMetadataPiece(piece, payload[begin:])
c.UsefulChunksReceived++
c.lastUsefulChunkReceived = time.Now()
- if !t.HaveAllMetadataPieces() {
+ if !t.haveAllMetadataPieces() {
break
}
cl.completedMetadata(t)
case pp.RequestMetadataExtensionMsgType:
- if !t.HaveMetadataPiece(piece) {
- c.Post(t.NewMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
+ if !t.haveMetadataPiece(piece) {
+ c.Post(t.newMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
break
}
start := (1 << 14) * piece
- c.Post(t.NewMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[start:start+t.metadataPieceSize(piece)]))
+ c.Post(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[start:start+t.metadataPieceSize(piece)]))
case pp.RejectMetadataExtensionMsgType:
default:
err = errors.New("unknown msg_type value")
}
func (f *File) Progress() (ret []FilePieceState) {
- pieceSize := int64(f.t.UsualPieceSize())
+ pieceSize := int64(f.t.usualPieceSize())
off := f.offset % pieceSize
remaining := f.length
for i := int(f.offset / pieceSize); ; i++ {
func (t Torrent) SetRegionPriority(off, len int64) {
t.cl.mu.Lock()
defer t.cl.mu.Unlock()
- pieceSize := int64(t.UsualPieceSize())
+ pieceSize := int64(t.usualPieceSize())
for i := off / pieceSize; i*pieceSize < off+len; i++ {
t.cl.prioritizePiece(t.torrent, int(i), piecePriorityNormal)
}
return
}
cl.mu.RLock()
- req.Left = t.BytesLeft()
+ req.Left = t.bytesLeft()
trackers := t.Trackers
cl.mu.RUnlock()
if cl.announceTorrentTrackersFastStart(&req, trackers, t) {
newAnnounce:
for cl.waitWantPeers(t) {
cl.mu.RLock()
- req.Left = t.BytesLeft()
+ req.Left = t.bytesLeft()
trackers = t.Trackers
cl.mu.RUnlock()
numTrackersTried := 0
if !t.haveInfo() {
return false
}
- if t.NumPiecesCompleted() != t.numPieces() {
+ if t.numPiecesCompleted() != t.numPieces() {
return false
}
}
c.lastUsefulChunkReceived = time.Now()
// Write the chunk out.
- err := t.WriteChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
+ err := t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
if err != nil {
return fmt.Errorf("error writing chunk: %s", err)
}
p.Hashing = true
p.QueuedForHash = false
cl.mu.Unlock()
- sum := t.HashPiece(index)
+ sum := t.hashPiece(index)
cl.mu.Lock()
select {
case <-t.closing:
}
}
-func (t *torrent) InvalidateMetadata() {
+func (t *torrent) invalidateMetadata() {
t.MetaData = nil
t.metadataHave = nil
t.Info = nil
t.metadataHave[index] = true
}
-func (t *torrent) MetadataPieceCount() int {
+func (t *torrent) metadataPieceCount() int {
return (len(t.MetaData) + (1 << 14) - 1) / (1 << 14)
}
-func (t *torrent) HaveMetadataPiece(piece int) bool {
+func (t *torrent) haveMetadataPiece(piece int) bool {
if t.haveInfo() {
return (1<<14)*piece < len(t.MetaData)
} else {
return
}
-func (t *torrent) HaveAllMetadataPieces() bool {
+func (t *torrent) haveAllMetadataPieces() bool {
if t.haveInfo() {
return true
}
return 'H'
case !p.EverHashed:
return '?'
- case t.PiecePartiallyDownloaded(index):
+ case t.piecePartiallyDownloaded(index):
switch p.Priority {
case piecePriorityNone:
return 'F' // Forgotten
return metadataPieceSize(len(t.MetaData), piece)
}
-func (t *torrent) NewMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
+func (t *torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
d := map[string]int{
"msg_type": msgType,
"piece": piece,
fmt.Fprintf(w, "Infohash: %x\n", t.InfoHash)
fmt.Fprintf(w, "Piece length: %s\n", func() string {
if t.haveInfo() {
- return fmt.Sprint(t.UsualPieceSize())
+ return fmt.Sprint(t.usualPieceSize())
} else {
return "?"
}
}
// TODO: Include URIs that weren't converted to tracker clients.
-func (t *torrent) AnnounceList() (al [][]string) {
+func (t *torrent) announceList() (al [][]string) {
for _, tier := range t.Trackers {
var l []string
for _, tr := range tier {
CreationDate: time.Now().Unix(),
Comment: "dynamic metainfo from client",
CreatedBy: "go.torrent",
- AnnounceList: t.AnnounceList(),
+ AnnounceList: t.announceList(),
}
}
-func (t *torrent) BytesLeft() (left int64) {
+func (t *torrent) bytesLeft() (left int64) {
if !t.haveInfo() {
return -1
}
return
}
-func (t *torrent) PiecePartiallyDownloaded(index int) bool {
+func (t *torrent) piecePartiallyDownloaded(index int) bool {
return t.PieceNumPendingBytes(pp.Integer(index)) != t.PieceLength(pp.Integer(index))
}
return (pieceSize + chunkSize - 1) / chunkSize
}
-func (t *torrent) UsualPieceSize() int {
+func (t *torrent) usualPieceSize() int {
return int(t.Info.PieceLength)
}
-func (t *torrent) LastPieceSize() int {
+func (t *torrent) lastPieceSize() int {
return int(t.PieceLength(pp.Integer(t.numPieces() - 1)))
}
return len(t.Info.Pieces) / 20
}
-func (t *torrent) NumPiecesCompleted() (num int) {
+func (t *torrent) numPiecesCompleted() (num int) {
for _, p := range t.Pieces {
if p.Complete() {
num++
}
func (t *torrent) requestOffset(r request) int64 {
- return torrentRequestOffset(t.Length(), int64(t.UsualPieceSize()), r)
+ return torrentRequestOffset(t.Length(), int64(t.usualPieceSize()), r)
}
// Return the request that would include the given offset into the torrent data.
return torrentOffsetRequest(t.Length(), t.Info.PieceLength, chunkSize, off)
}
-func (t *torrent) WriteChunk(piece int, begin int64, data []byte) (err error) {
+func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
_, err = t.data.WriteAt(data, int64(piece)*t.Info.PieceLength+begin)
return
}
return
}
-func (t *torrent) HashPiece(piece pp.Integer) (ps pieceSum) {
+func (t *torrent) hashPiece(piece pp.Integer) (ps pieceSum) {
hash := pieceHash.New()
t.data.WriteSectionTo(hash, int64(piece)*t.Info.PieceLength, t.Info.PieceLength)
util.CopyExact(ps[:], hash.Sum(nil))
}
func (t *torrent) extentPieces(off, _len int64) (pieces []int) {
- for i := off / int64(t.UsualPieceSize()); i*int64(t.UsualPieceSize()) < off+_len; i++ {
+ for i := off / int64(t.usualPieceSize()); i*int64(t.usualPieceSize()) < off+_len; i++ {
pieces = append(pieces, int(i))
}
return