A lot of code was using unintentionally exported stuff from the embedded *torrent in Torrent.
}
fmt.Fprint(w, "\n")
if t.haveInfo() {
- fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.Length())), t.Length())
+ fmt.Fprintf(w, "%f%% of %d bytes", 100*(1-float32(t.bytesLeft())/float32(t.length)), t.length)
} else {
w.WriteString("<missing metainfo>")
}
func (t Torrent) SetRegionPriority(off, len int64) {
t.cl.mu.Lock()
defer t.cl.mu.Unlock()
- pieceSize := int64(t.usualPieceSize())
+ pieceSize := int64(t.torrent.usualPieceSize())
for i := off / pieceSize; i*pieceSize < off+len; i++ {
t.cl.raisePiecePriority(t.torrent, int(i), PiecePriorityNormal)
}
func (t Torrent) DownloadAll() {
t.cl.mu.Lock()
defer t.cl.mu.Unlock()
- for i := range iter.N(t.numPieces()) {
+ for i := range iter.N(t.torrent.numPieces()) {
t.cl.raisePiecePriority(t.torrent, i, PiecePriorityNormal)
}
// Nice to have the first and last pieces sooner for various interactive
// purposes.
t.cl.raisePiecePriority(t.torrent, 0, PiecePriorityReadahead)
- t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, PiecePriorityReadahead)
+ t.cl.raisePiecePriority(t.torrent, t.torrent.numPieces()-1, PiecePriorityReadahead)
}
// Returns nil metainfo if it isn't in the cache. Checks that the retrieved
// TODO: The piece state publishing is kinda jammed in here until I have a
// more thorough test.
go func() {
- s := leecherGreeting.pieceStateChanges.Subscribe()
+ s := leecherGreeting.torrent.pieceStateChanges.Subscribe()
defer s.Close()
for i := range s.Values {
log.Print(i)
if new {
t.FailNow()
}
- assert.EqualValues(t, T.Trackers[0][0].URL(), "http://a")
- assert.EqualValues(t, T.Trackers[1][0].URL(), "udp://b")
+ assert.EqualValues(t, T.torrent.Trackers[0][0].URL(), "http://a")
+ assert.EqualValues(t, T.torrent.Trackers[1][0].URL(), "udp://b")
}
type badData struct{}
// Returns the state of pieces in this file.
func (f *File) State() (ret []FilePieceState) {
- pieceSize := int64(f.t.usualPieceSize())
+ pieceSize := int64(f.t.torrent.usualPieceSize())
off := f.offset % pieceSize
remaining := f.length
for i := int(f.offset / pieceSize); ; i++ {
len1 = remaining
}
f.t.cl.mu.RLock()
- ps := f.t.pieceState(i)
+ ps := f.t.torrent.pieceState(i)
f.t.cl.mu.RUnlock()
ret = append(ret, FilePieceState{len1, ps})
off = 0
// defer func() {
// log.Println("readable", ret)
// }()
- if r.t.isClosed() {
+ if r.t.torrent.isClosed() {
return true
}
- req, ok := r.t.offsetRequest(off)
+ req, ok := r.t.torrent.offsetRequest(off)
if !ok {
panic(off)
}
if r.responsive {
- return r.t.haveChunk(req)
+ return r.t.torrent.haveChunk(req)
}
- return r.t.pieceComplete(int(req.Index))
+ return r.t.torrent.pieceComplete(int(req.Index))
}
// How many bytes are available to read. Max is the most we could require.
func (r *Reader) available(off, max int64) (ret int64) {
for max > 0 {
- req, ok := r.t.offsetRequest(off)
+ req, ok := r.t.torrent.offsetRequest(off)
if !ok {
break
}
- if !r.t.haveChunk(req) {
+ if !r.t.torrent.haveChunk(req) {
break
}
- len1 := int64(req.Length) - (off - r.t.requestOffset(req))
+ len1 := int64(req.Length) - (off - r.t.torrent.requestOffset(req))
max -= len1
ret += len1
off += len1
tp.noPendingWrites.Wait()
}
tp.pendingWritesMutex.Unlock()
- n, err = dataReadAt(r.t.data, b1, pos)
+ n, err = dataReadAt(r.t.torrent.data, b1, pos)
if n != 0 {
err = nil
return
}
- if r.t.isClosed() {
+ if r.t.torrent.isClosed() {
if err == nil {
err = errors.New("torrent closed")
}
// The public handle to a live torrent within a Client.
type Torrent struct {
- cl *Client
- *torrent
+ cl *Client
+ torrent *torrent
}
// The torrent's infohash. This is fixed and cannot change. It uniquely
// same state. The sum of the state run lengths is the number of pieces
// in the torrent.
func (t Torrent) PieceStateRuns() []PieceStateRun {
- t.stateMu.Lock()
- defer t.stateMu.Unlock()
+ t.torrent.stateMu.Lock()
+ defer t.torrent.stateMu.Unlock()
return t.torrent.pieceStateRuns()
}
func (t Torrent) NumPieces() int {
- return t.numPieces()
+ return t.torrent.numPieces()
}
// Drop the torrent from the client, and close it.
func (t Torrent) BytesCompleted() int64 {
t.cl.mu.RLock()
defer t.cl.mu.RUnlock()
- return t.bytesCompleted()
+ return t.torrent.bytesCompleted()
}
// The subscription emits as (int) the index of pieces as their state changes.
defer t.cl.mu.Unlock()
t.torrent.setDisplayName(dn)
}
+
+func (t Torrent) Name() string {
+ t.cl.mu.Lock()
+ defer t.cl.mu.Unlock()
+ return t.torrent.Name()
+}
+
+func (t Torrent) Length() int64 {
+ select {
+ case <-t.GotInfo():
+ return t.torrent.length
+ default:
+ return -1
+ }
+}
return
}
-func (t *torrent) Length() int64 {
- return t.length
-}
-
func (t *torrent) isClosed() bool {
select {
case <-t.closing:
}
func (t *torrent) requestOffset(r request) int64 {
- return torrentRequestOffset(t.Length(), int64(t.usualPieceSize()), r)
+ return torrentRequestOffset(t.length, int64(t.usualPieceSize()), r)
}
// Return the request that would include the given offset into the torrent
// data. Returns !ok if there is no such request.
func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
- return torrentOffsetRequest(t.Length(), t.Info.PieceLength, int64(t.chunkSize), off)
+ return torrentOffsetRequest(t.length, t.Info.PieceLength, int64(t.chunkSize), off)
}
func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
return
}
if int(piece) == t.numPieces()-1 {
- len_ = pp.Integer(t.Length() % t.Info.PieceLength)
+ len_ = pp.Integer(t.length % t.Info.PieceLength)
}
if len_ == 0 {
len_ = pp.Integer(t.Info.PieceLength)