10 "github.com/anacrolix/log"
11 "github.com/anacrolix/missinggo"
14 type Reader interface {
18 missinggo.ReadContexter
23 // Piece range by piece index, [begin, end).
24 type pieceRange struct {
28 // Accesses Torrent data via a Client. Reads block until the data is
29 // available. Seeks and readahead also drive Client behaviour.
33 // Adjust the read/seek window to handle Readers locked to File extents
36 // Ensure operations that change the position are exclusive, like Read()
40 // Required when modifying pos and readahead, or reading them without
45 // The cached piece range this reader wants downloaded. The zero value
46 // corresponds to nothing. We cache this so that changes can be detected,
47 // and bubbled up to the Torrent only as required.
51 var _ io.ReadCloser = &reader{}
53 // Don't wait for pieces to complete and be verified. Read calls return as
54 // soon as they can when the underlying chunks become available.
55 func (r *reader) SetResponsive() {
57 r.t.cl.event.Broadcast()
60 // Disable responsive mode. TODO: Remove?
61 func (r *reader) SetNonResponsive() {
63 r.t.cl.event.Broadcast()
66 // Configure the number of bytes ahead of a read that should also be
67 // prioritized in preparation for further reads.
68 func (r *reader) SetReadahead(readahead int64) {
70 r.readahead = readahead
77 // How many bytes are available to read. Max is the most we could require.
78 func (r *reader) available(off, max int64) (ret int64) {
81 req, ok := r.t.offsetRequest(off)
85 if !r.responsive && !r.t.pieceComplete(pieceIndex(req.Index)) {
88 if !r.t.haveChunk(req) {
91 len1 := int64(req.Length) - (off - r.t.requestOffset(req))
96 // Ensure that ret hasn't exceeded our original max.
103 func (r *reader) waitReadable(off int64) {
104 // We may have been sent back here because we were told we could read but
109 // Calculates the pieces this reader wants downloaded, ignoring the cached
110 // value at r.pieces.
111 func (r *reader) piecesUncached() (ret pieceRange) {
114 // Needs to be at least 1, because [x, x) means we don't want
118 if ra > r.length-r.pos {
119 ra = r.length - r.pos
121 ret.begin, ret.end = r.t.byteRegionPieces(r.torrentOffset(r.pos), ra)
125 func (r *reader) Read(b []byte) (n int, err error) {
126 return r.ReadContext(context.Background(), b)
129 func (r *reader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
130 // This is set under the Client lock if the Context is canceled. I think we coordinate on a
131 // separate variable so as to avoid false negatives with race conditions due to Contexts being
134 if ctx.Done() != nil {
135 ctx, cancel := context.WithCancel(ctx)
136 // Abort the goroutine when the function returns.
146 // Hmmm, if a Read gets stuck, this means you can't change position for
147 // other purposes. That seems reasonable, but unusual.
149 defer r.opMu.Unlock()
150 n, err = r.readOnceAt(b, r.pos, &ctxErr)
153 panic("expected error")
163 if r.pos >= r.length {
165 } else if err == io.EOF {
166 err = io.ErrUnexpectedEOF
171 // Wait until some data should be available to read. Tickles the client if it
172 // isn't. Returns how much should be readable without blocking.
173 func (r *reader) waitAvailable(pos, wanted int64, ctxErr *error, wait bool) (avail int64, err error) {
175 defer r.t.cl.unlock()
177 avail = r.available(pos, wanted)
181 if r.t.closed.IsSet() {
182 err = errors.New("torrent closed")
189 if r.t.dataDownloadDisallowed || !r.t.networkingEnabled {
190 err = errors.New("downloading disabled and data not already available")
200 // Adds the reader's torrent offset to the reader object offset (for example the reader might be
201 // constrainted to a particular file within the torrent).
202 func (r *reader) torrentOffset(readerPos int64) int64 {
203 return r.offset + readerPos
206 // Performs at most one successful read to torrent storage.
207 func (r *reader) readOnceAt(b []byte, pos int64, ctxErr *error) (n int, err error) {
214 avail, err = r.waitAvailable(pos, int64(len(b)), ctxErr, n == 0)
218 firstPieceIndex := pieceIndex(r.torrentOffset(pos) / r.t.info.PieceLength)
219 firstPieceOffset := r.torrentOffset(pos) % r.t.info.PieceLength
220 b1 := missinggo.LimitLen(b, avail)
221 n, err = r.t.readAt(b1, r.torrentOffset(pos))
227 // TODO: Just reset pieces in the readahead window. This might help
228 // prevent thrashing with small caches and file and piece priorities.
229 r.log(log.Fstr("error reading torrent %s piece %d offset %d, %d bytes: %v",
230 r.t.infoHash.HexString(), firstPieceIndex, firstPieceOffset, len(b1), err))
231 if !r.t.updatePieceCompletion(firstPieceIndex) {
232 r.log(log.Fstr("piece %d completion unchanged", firstPieceIndex))
234 // Update the rest of the piece completions in the readahead window, without alerting to
235 // changes (since only the first piece, the one above, could have generated the read error
236 // we're currently handling).
237 if r.pieces.begin != firstPieceIndex {
238 panic(fmt.Sprint(r.pieces.begin, firstPieceIndex))
240 for index := r.pieces.begin + 1; index < r.pieces.end; index++ {
241 r.t.updatePieceCompletion(index)
247 func (r *reader) Close() error {
249 defer r.t.cl.unlock()
254 func (r *reader) posChanged() {
255 to := r.piecesUncached()
261 // log.Printf("reader pos changed %v->%v", from, to)
262 r.t.readerPosChanged(from, to)
265 func (r *reader) Seek(off int64, whence int) (ret int64, err error) {
267 defer r.opMu.Unlock()
277 r.pos = r.length + off
279 err = errors.New("bad whence")
287 func (r *reader) log(m log.Msg) {
288 r.t.logger.Log(m.Skip(1))