func (cl *Client) readRaisePiecePriorities(t *torrent, off, readaheadBytes int64) {
index := int(off / int64(t.usualPieceSize()))
- cl.raisePiecePriority(t, index, piecePriorityNow)
+ cl.raisePiecePriority(t, index, PiecePriorityNow)
index++
if index >= t.numPieces() {
return
}
- cl.raisePiecePriority(t, index, piecePriorityNext)
+ cl.raisePiecePriority(t, index, PiecePriorityNext)
for range iter.N(readaheadPieces(readaheadBytes, t.Info.PieceLength)) {
index++
if index >= t.numPieces() {
break
}
- cl.raisePiecePriority(t, index, piecePriorityReadahead)
+ cl.raisePiecePriority(t, index, PiecePriorityReadahead)
}
}
defer t.cl.mu.Unlock()
pieceSize := int64(t.usualPieceSize())
for i := off / pieceSize; i*pieceSize < off+len; i++ {
- t.cl.prioritizePiece(t.torrent, int(i), piecePriorityNormal)
+ t.cl.raisePiecePriority(t.torrent, int(i), PiecePriorityNormal)
}
}
t.cl.mu.Lock()
defer t.cl.mu.Unlock()
for i := range iter.N(t.numPieces()) {
- t.cl.raisePiecePriority(t.torrent, i, piecePriorityNormal)
+ t.cl.raisePiecePriority(t.torrent, i, PiecePriorityNormal)
}
// Nice to have the first and last pieces sooner for various interactive
// purposes.
- t.cl.raisePiecePriority(t.torrent, 0, piecePriorityReadahead)
- t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, piecePriorityReadahead)
+ t.cl.raisePiecePriority(t.torrent, 0, PiecePriorityReadahead)
+ t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, PiecePriorityReadahead)
}
// Returns nil metainfo if it isn't in the cache. Checks that the retrieved
correct := t.pieceComplete(piece)
p := t.Pieces[piece]
if correct {
- p.Priority = piecePriorityNone
+ p.Priority = PiecePriorityNone
p.PendingChunkSpecs = nil
for req := range t.urgent {
if int(req.Index) == piece {
// Adjust piece position in the request order for this connection based on the
// given piece priority.
func (cn *connection) pendPiece(piece int, priority piecePriority) {
- if priority == piecePriorityNone {
+ if priority == PiecePriorityNone {
cn.pieceRequestOrder.DeletePiece(piece)
return
}
// [ Normal ]
key := func() int {
switch priority {
- case piecePriorityNow:
+ case PiecePriorityNow:
return -3*len(cn.piecePriorities) + 3*pp
- case piecePriorityNext:
+ case PiecePriorityNext:
return -2*len(cn.piecePriorities) + 2*pp
- case piecePriorityReadahead:
+ case PiecePriorityReadahead:
return -len(cn.piecePriorities) + pp
- case piecePriorityNormal:
+ case PiecePriorityNormal:
return pp
default:
panic(priority)
piecePriorities: []int{1, 4, 0, 3, 2},
}
testRequestOrder(nil, c.pieceRequestOrder, t)
- c.pendPiece(2, piecePriorityNone)
+ c.pendPiece(2, PiecePriorityNone)
testRequestOrder(nil, c.pieceRequestOrder, t)
- c.pendPiece(1, piecePriorityNormal)
- c.pendPiece(2, piecePriorityNormal)
+ c.pendPiece(1, PiecePriorityNormal)
+ c.pendPiece(2, PiecePriorityNormal)
testRequestOrder([]int{2, 1}, c.pieceRequestOrder, t)
- c.pendPiece(0, piecePriorityNormal)
+ c.pendPiece(0, PiecePriorityNormal)
testRequestOrder([]int{2, 0, 1}, c.pieceRequestOrder, t)
- c.pendPiece(1, piecePriorityReadahead)
+ c.pendPiece(1, PiecePriorityReadahead)
testRequestOrder([]int{1, 2, 0}, c.pieceRequestOrder, t)
- c.pendPiece(4, piecePriorityNow)
+ c.pendPiece(4, PiecePriorityNow)
// now(4), r(1), normal(0, 2)
testRequestOrder([]int{4, 1, 2, 0}, c.pieceRequestOrder, t)
- c.pendPiece(2, piecePriorityReadahead)
+ c.pendPiece(2, PiecePriorityReadahead)
// N(4), R(1, 2), N(0)
testRequestOrder([]int{4, 2, 1, 0}, c.pieceRequestOrder, t)
- c.pendPiece(1, piecePriorityNow)
+ c.pendPiece(1, PiecePriorityNow)
// now(4, 1), readahead(2), normal(0)
// in the same order, the keys will be: -15+6, -15+12, -5, 1
// so we test that a very low priority (for this connection), "now"
testRequestOrder([]int{4, 2, 1, 0}, c.pieceRequestOrder, t)
// Note this intentially sets to None a piece that's not in the order.
for i := range iter.N(5) {
- c.pendPiece(i, piecePriorityNone)
+ c.pendPiece(i, PiecePriorityNone)
}
testRequestOrder(nil, c.pieceRequestOrder, t)
}
}
type FilePieceState struct {
- Length int64
- State byte
+ Bytes int64 // Bytes within the piece that are part of this File.
+ PieceState
}
-func (f *File) Progress() (ret []FilePieceState) {
+// Returns the state of pieces in this file.
+func (f *File) State() (ret []FilePieceState) {
pieceSize := int64(f.t.usualPieceSize())
off := f.offset % pieceSize
remaining := f.length
if len1 > remaining {
len1 = remaining
}
- ret = append(ret, FilePieceState{len1, f.t.pieceStatusChar(i)})
+ ret = append(ret, FilePieceState{len1, f.t.pieceState(i)})
off = 0
remaining -= len1
}
type piecePriority byte
const (
- piecePriorityNone piecePriority = iota
- piecePriorityNormal
- piecePriorityReadahead
- piecePriorityNext
- piecePriorityNow
+ PiecePriorityNone piecePriority = iota // Not wanted.
+ PiecePriorityNormal // Wanted.
+ PiecePriorityReadahead // May be required soon.
+ PiecePriorityNext // Succeeds a piece where a read occurred.
+ PiecePriorityNow // A read occurred in this piece.
)
type piece struct {
--- /dev/null
+package torrent
+
+// The current state of a piece.
+type PieceState struct {
+ Priority piecePriority
+ // The piece is available in its entirety.
+ Complete bool
+ // The piece is being hashed, or is queued for hash.
+ Checking bool
+ // Some of the piece has been obtained.
+ Partial bool
+}
+
+// Represents a series of consecutive pieces with the same state.
+type PieceStateRun struct {
+ PieceState
+ Length int // How many consecutive pieces have this state.
+}
}
return
}
+
+// Returns the state of pieces of the torrent. They are grouped into runs of
+// same state. The sum of the state run lengths is the number of pieces
+// in the torrent.
+func (t *Torrent) PieceStateRuns() []PieceStateRun {
+ t.stateMu.Lock()
+ defer t.stateMu.Unlock()
+ return t.torrent.pieceStateRuns()
+}
"sync"
"time"
+ "github.com/anacrolix/missinggo"
"github.com/bradfitz/iter"
"github.com/anacrolix/torrent/bencode"
return ""
}
-func (t *torrent) pieceStatusChar(index int) byte {
+func (t *torrent) pieceState(index int) (ret PieceState) {
p := t.Pieces[index]
- switch {
- case t.pieceComplete(index):
- return 'C'
- case p.QueuedForHash:
- return 'Q'
- case p.Hashing:
- return 'H'
- case !p.EverHashed:
- return '?'
- case t.piecePartiallyDownloaded(index):
- switch p.Priority {
- case piecePriorityNone:
- return 'F' // Forgotten
- default:
- return 'P'
- }
- default:
- switch p.Priority {
- case piecePriorityNone:
- return 'z'
- case piecePriorityNow:
- return '!'
- case piecePriorityReadahead:
- return 'R'
- case piecePriorityNext:
- return 'N'
- default:
- return '.'
- }
+ ret.Priority = p.Priority
+ if t.pieceComplete(index) {
+ ret.Complete = true
}
+ if p.QueuedForHash || p.Hashing {
+ ret.Checking = true
+ }
+ if t.piecePartiallyDownloaded(index) {
+ ret.Partial = true
+ }
+ return
}
func (t *torrent) metadataPieceSize(piece int) int {
}
}
-type PieceStatusCharSequence struct {
- Char byte // The state of this sequence of pieces.
- Count int // How many consecutive pieces have this state.
-}
-
-// Returns the state of pieces of the torrent. They are grouped into runs of
-// same state. The sum of the Counts of the sequences is the number of pieces
-// in the torrent. See the function torrent.pieceStatusChar for the possible
-// states.
-func (t *torrent) PieceStatusCharSequences() []PieceStatusCharSequence {
- t.stateMu.Lock()
- defer t.stateMu.Unlock()
- return t.pieceStatusCharSequences()
+func (t *torrent) pieceStateRuns() (ret []PieceStateRun) {
+ rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
+ ret = append(ret, PieceStateRun{
+ PieceState: el.(PieceState),
+ Length: int(count),
+ })
+ })
+ for index := range t.Pieces {
+ rle.Append(t.pieceState(index), 1)
+ }
+ rle.Flush()
+ return
}
-// Returns the length of sequences of identical piece status chars.
-func (t *torrent) pieceStatusCharSequences() (ret []PieceStatusCharSequence) {
- var (
- char byte
- count int
- )
- writeSequence := func() {
- ret = append(ret, PieceStatusCharSequence{char, count})
- }
- if len(t.Pieces) != 0 {
- char = t.pieceStatusChar(0)
- }
- for index := range t.Pieces {
- char1 := t.pieceStatusChar(index)
- if char1 == char {
- count++
- } else {
- writeSequence()
- char = char1
- count = 1
+// Produces a small string representing a PieceStateRun.
+func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
+ ret = fmt.Sprintf("%d", psr.Length)
+ ret += func() string {
+ switch psr.Priority {
+ case PiecePriorityNext:
+ return "N"
+ case PiecePriorityNormal:
+ return "."
+ case PiecePriorityReadahead:
+ return "R"
+ case PiecePriorityNow:
+ return "!"
+ default:
+ return ""
}
+ }()
+ if psr.Checking {
+ ret += "H"
+ }
+ if psr.Partial {
+ ret += "P"
}
- if count != 0 {
- writeSequence()
+ if psr.Complete {
+ ret += "C"
}
return
}
}
}())
if t.haveInfo() {
- fmt.Fprint(w, "Pieces: ")
- for _, seq := range t.pieceStatusCharSequences() {
- fmt.Fprintf(w, "%d%c ", seq.Count, seq.Char)
+ fmt.Fprint(w, "Pieces:")
+ for _, psr := range t.pieceStateRuns() {
+ w.Write([]byte(" "))
+ w.Write([]byte(pieceStateRunStatusChars(psr)))
}
fmt.Fprintln(w)
}
}
func (t *torrent) piecePartiallyDownloaded(index int) bool {
- return t.pieceNumPendingBytes(index) != t.pieceLength(index)
+ pendingBytes := t.pieceNumPendingBytes(index)
+ return pendingBytes != 0 && pendingBytes != t.pieceLength(index)
}
func numChunksForPiece(chunkSize int, pieceSize int) int {
if p.Hashing {
return false
}
- if p.Priority == piecePriorityNone {
+ if p.Priority == PiecePriorityNone {
if !t.urgentChunkInPiece(index) {
return false
}