import (
"testing"
- qt "github.com/go-quicktest/qt"
+ "github.com/go-quicktest/qt"
)
func LengthIterFromSlice(ls []Length) LengthIter {
{0, 1536},
{0, 667},
})
+ checkContiguous(t, newLocater,
+ []Length{0, 2, 0, 2, 0}, // 128737588
+ Extent{1, 2},
+ 1,
+ []Extent{
+ {1, 1},
+ {0, 0},
+ {0, 1},
+ })
+ checkContiguous(t, newLocater,
+ []Length{2, 0, 2, 0}, // 128737588
+ Extent{1, 3},
+ 0,
+ []Extent{
+ {1, 1},
+ {0, 0},
+ {0, 2},
+ })
}
func TestScan(t *testing.T) {
return NewIndex(li).Locate
})
}
+
+func TestIndexLocateIter(t *testing.T) {
+ testLocater(t, func(li LengthIter) Locater {
+ index := NewIndex(li)
+ return func(extent Extent, callback Callback) bool {
+ for i, e := range index.LocateIter(extent) {
+ if !callback(i, e) {
+ return false
+ }
+ }
+ return true
+ }
+ })
+}
import (
"errors"
+ "expvar"
"fmt"
"io"
"io/fs"
g "github.com/anacrolix/generics"
"github.com/anacrolix/missinggo/v2/panicif"
+ "golang.org/x/sys/unix"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/segments"
return me.t.partFiles()
}
+type zeroReader struct{}
+
+func (me zeroReader) Read(p []byte) (n int, err error) {
+ clear(p)
+ return len(p), nil
+}
+
func (me *filePieceImpl) WriteTo(w io.Writer) (n int64, err error) {
for fileIndex, extent := range me.iterFileSegments() {
- file := me.t.file(fileIndex)
- var f *os.File
- f, err = me.t.openFile(file)
+ var n1 int64
+ n1, err = me.writeFileTo(w, fileIndex, extent)
+ n += n1
if err != nil {
return
}
- f.Seek(extent.Start, io.SeekStart)
+ panicif.NotEq(n1, extent.Length)
+ }
+ return
+}
+
+var (
+ packageExpvarMap = expvar.NewMap("torrentStorage")
+)
+
+func (me *filePieceImpl) writeFileTo(w io.Writer, fileIndex int, extent segments.Extent) (written int64, err error) {
+ if extent.Length == 0 {
+ return
+ }
+ file := me.t.file(fileIndex)
+ var f *os.File
+ f, err = me.t.openFile(file)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+ panicif.GreaterThan(extent.End(), file.FileInfo.Length)
+ extentRemaining := extent.Length
+ var dataOffset int64
+ dataOffset, err = unix.Seek(int(f.Fd()), extent.Start, unix.SEEK_DATA)
+ if err == unix.ENXIO {
+ // File has no more data. Treat as short write like io.CopyN.
+ err = io.EOF
+ return
+ }
+ panicif.Err(err)
+ panicif.LessThan(dataOffset, extent.Start)
+ if dataOffset > extent.Start {
+ // Write zeroes until the end of the hole we're in.
var n1 int64
- n1, err = io.CopyN(w, f, extent.Length)
- n += n1
- f.Close()
+ n := min(dataOffset-extent.Start, extent.Length)
+ n1, err = io.CopyN(w, zeroReader{}, n)
+ packageExpvarMap.Add("bytesReadSkippedHole", n1)
+ written += n1
if err != nil {
return
}
+ panicif.NotEq(n1, n)
+ extentRemaining -= n1
}
+ var n1 int64
+ n1, err = io.CopyN(w, f, extentRemaining)
+ packageExpvarMap.Add("bytesReadNotSkipped", n1)
+ written += n1
return
}
differingPeers, err = t.hashPieceWithSpecificHash(piece, h)
var sum [32]byte
// What about the final piece in a torrent? From BEP 52: "The layer is chosen so that one
- // hash covers piece length bytes.". Note that if a piece doesn't have a hash in piece
- // layers it's because it's not larger than the piece length.
+ // hash covers piece length bytes". Note that if a piece doesn't have a hash in piece layers
+ // it's because it's not larger than the piece length.
sumExactly(sum[:], func(b []byte) []byte {
return h.SumMinLength(b, int(t.info.PieceLength))
})