}())
leecherTorrent.AddClientPeer(seeder)
reader := leecherTorrent.NewReader()
- defer reader.Close()
+ t.Cleanup(func() { reader.Close() })
reader.SetReadahead(0)
reader.SetResponsive()
b := make([]byte, 2)
return rip.To4() == nil && rip.To16() != nil
}
-func clamp(min, value, max int64) int64 {
- if min > max {
- panic("harumph")
- }
- if value < min {
- value = min
- }
- if value > max {
- value = max
- }
- return value
-}
-
-func max(as ...int64) int64 {
- ret := as[0]
- for _, a := range as[1:] {
- if a > ret {
- ret = a
- }
- }
- return ret
-}
-
func maxInt(as ...int) int {
ret := as[0]
for _, a := range as[1:] {
)
// Accesses Torrent data via a Client. Reads block until the data is available. Seeks and readahead
-// also drive Client behaviour. Not safe for concurrent use.
+// also drive Client behaviour. Not safe for concurrent use. There are Torrent, File and Piece
+// constructors for this.
type Reader interface {
+ // Read/Seek and not ReadAt because we want to return data as soon as it's available, and
+ // because we want a single read head.
io.ReadSeekCloser
+ // Deprecated: This prevents type asserting for optional interfaces because a wrapper is
+ // required to adapt back to io.Reader.
missinggo.ReadContexter
// Configure the number of bytes ahead of a read that should also be prioritized in preparation
// for further reads. Overridden by non-nil readahead func, see SetReadaheadFunc.
// locked.
SetReadaheadFunc(ReadaheadFunc)
// Don't wait for pieces to complete and be verified. Read calls return as soon as they can when
- // the underlying chunks become available.
+ // the underlying chunks become available. May be deprecated, although BitTorrent v2 will mean
+ // we can support this without piece hashing.
SetResponsive()
}
}
// Adds the reader's torrent offset to the reader object offset (for example the reader might be
-// constrainted to a particular file within the torrent).
+// constrained to a particular file within the torrent).
func (r *reader) torrentOffset(readerPos int64) int64 {
return r.offset + readerPos
}
--- /dev/null
+package possumTorrentStorage
+
+import (
+ "cmp"
+)
+
+// Sorts by a precomputed key but swaps on another slice at the same time.
+type keySorter[T any, K cmp.Ordered] struct {
+ orig []T
+ keys []K
+}
+
+func (o keySorter[T, K]) Len() int {
+ return len(o.keys)
+}
+
+func (o keySorter[T, K]) Less(i, j int) bool {
+ return o.keys[i] < o.keys[j]
+}
+
+func (o keySorter[T, K]) Swap(i, j int) {
+ o.keys[i], o.keys[j] = o.keys[j], o.keys[i]
+ o.orig[i], o.orig[j] = o.orig[j], o.orig[i]
+}
package possumTorrentStorage
import (
- "cmp"
"fmt"
"io"
"sort"
var _ storage.ConsecutiveChunkReader = Provider{}
-// Sorts by a precomputed key but swaps on another slice at the same time.
-type keySorter[T any, K cmp.Ordered] struct {
- orig []T
- keys []K
-}
-
-func (o keySorter[T, K]) Len() int {
- return len(o.keys)
-}
-
-func (o keySorter[T, K]) Less(i, j int) bool {
- return o.keys[i] < o.keys[j]
-}
-
-func (o keySorter[T, K]) Swap(i, j int) {
- o.keys[i], o.keys[j] = o.keys[j], o.keys[i]
- o.orig[i], o.orig[j] = o.orig[j], o.orig[i]
-}
-
// TODO: Should the parent ReadConsecutiveChunks method take the expected number of bytes to avoid
// trying to read discontinuous or incomplete sequences of chunks?
func (p Provider) ReadConsecutiveChunks(prefix string) (rc io.ReadCloser, err error) {
return p.PieceImpl.WriteAt(b, off)
}
+// If you're calling this you're probably doing something very inefficient. Consider WriteTo which
+// handles data spread across multiple objects in storage.
func (p Piece) ReadAt(b []byte, off int64) (n int, err error) {
if off < 0 {
err = os.ErrInvalid
_readerReadaheadPieces bitmap.Bitmap
// A cache of pieces we need to get. Calculated from various piece and file priorities and
- // completion states elsewhere. Includes piece data and piece v2 hashes.
+ // completion states elsewhere. Includes piece data and piece v2 hashes. Used for efficient set
+ // logic with peer pieces.
_pendingPieces roaring.Bitmap
// A cache of completed piece indices.
_completedPieces roaring.Bitmap
t.updatePiecePriorities(h.begin, h.end, "Torrent.readerPosChanged")
} else {
// Ranges overlap.
- end := l.end
- if h.end > end {
- end = h.end
- }
- t.updatePiecePriorities(l.begin, end, "Torrent.readerPosChanged")
+ t.updatePiecePriorities(l.begin, max(l.end, h.end), "Torrent.readerPosChanged")
}
}
return len(t.conns) + len(t.webSeeds)
}
+// Specifically, whether we can expect data to vanish while trying to read.
func (t *Torrent) hasStorageCap() bool {
f := t.storage.Capacity
if f == nil {