We need them external the torrent package so we can test the API for adding dialers and listeners.
"os"
"path/filepath"
"reflect"
- "sync"
"testing"
"time"
"github.com/bradfitz/iter"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/time/rate"
"github.com/anacrolix/dht/v2"
_ "github.com/anacrolix/envpprof"
"github.com/anacrolix/torrent/storage"
)
-func TestingConfig() *ClientConfig {
- cfg := NewDefaultClientConfig()
- cfg.ListenHost = LoopbackListenHost
- cfg.NoDHT = true
- cfg.DataDir = tempDir()
- cfg.DisableTrackers = true
- cfg.NoDefaultPortForwarding = true
- cfg.DisableAcceptRateLimiting = true
- cfg.ListenPort = 0
- //cfg.Debug = true
- //cfg.Logger = cfg.Logger.WithText(func(m log.Msg) string {
- // t := m.Text()
- // m.Values(func(i interface{}) bool {
- // t += fmt.Sprintf("\n%[1]T: %[1]v", i)
- // return true
- // })
- // return t
- //})
- return cfg
-}
-
func TestClientDefault(t *testing.T) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
cl.initLogger()
tor := cl.newTorrent(
mi.HashInfoBytes(),
- storage.NewFileWithCompletion(tempDir(), storage.NewMapPieceCompletion()),
+ storage.NewFileWithCompletion(TestingTempDir.NewSub(), storage.NewMapPieceCompletion()),
)
tor.setChunkSize(2)
tor.cl.lock()
}
}
-type fileCacheClientStorageFactoryParams struct {
- Capacity int64
- SetCapacity bool
- Wrapper func(*filecache.Cache) storage.ClientImpl
-}
-
-func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) storageFactory {
- return func(dataDir string) storage.ClientImpl {
- fc, err := filecache.NewCache(dataDir)
- if err != nil {
- panic(err)
- }
- if ps.SetCapacity {
- fc.SetCapacity(ps.Capacity)
- }
- return ps.Wrapper(fc)
- }
-}
-
-type storageFactory func(string) storage.ClientImpl
-
-func TestClientTransferDefault(t *testing.T) {
- testClientTransfer(t, testClientTransferParams{
- ExportClientStatus: true,
- LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
- Wrapper: fileCachePieceResourceStorage,
- }),
- })
-}
-
-func TestClientTransferRateLimitedUpload(t *testing.T) {
- started := time.Now()
- testClientTransfer(t, testClientTransferParams{
- // We are uploading 13 bytes (the length of the greeting torrent). The
- // chunks are 2 bytes in length. Then the smallest burst we can run
- // with is 2. Time taken is (13-burst)/rate.
- SeederUploadRateLimiter: rate.NewLimiter(11, 2),
- ExportClientStatus: true,
- })
- require.True(t, time.Since(started) > time.Second)
-}
-
-func TestClientTransferRateLimitedDownload(t *testing.T) {
- testClientTransfer(t, testClientTransferParams{
- LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
- })
-}
-
func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImpl {
return storage.NewResourcePieces(fc.AsResourceProvider())
}
-func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int64) {
- testClientTransfer(t, testClientTransferParams{
- LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
- SetCapacity: true,
- // Going below the piece length means it can't complete a piece so
- // that it can be hashed.
- Capacity: 5,
- Wrapper: fileCachePieceResourceStorage,
- }),
- SetReadahead: setReadahead,
- // Can't readahead too far or the cache will thrash and drop data we
- // thought we had.
- Readahead: readahead,
- ExportClientStatus: true,
- })
-}
-
-func TestClientTransferSmallCachePieceSizedReadahead(t *testing.T) {
- testClientTransferSmallCache(t, true, 5)
-}
-
-func TestClientTransferSmallCacheLargeReadahead(t *testing.T) {
- testClientTransferSmallCache(t, true, 15)
-}
-
-func TestClientTransferSmallCacheDefaultReadahead(t *testing.T) {
- testClientTransferSmallCache(t, false, -1)
-}
-
-func TestClientTransferVarious(t *testing.T) {
- // Leecher storage
- for _, ls := range []struct {
- name string
- f storageFactory
- }{
- {"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
- Wrapper: fileCachePieceResourceStorage,
- })},
- {"Boltdb", storage.NewBoltDB},
- } {
- t.Run(fmt.Sprintf("LeecherStorage=%s", ls.name), func(t *testing.T) {
- // Seeder storage
- for _, ss := range []struct {
- name string
- f func(string) storage.ClientImpl
- }{
- {"File", storage.NewFile},
- {"Mmap", storage.NewMMap},
- } {
- t.Run(fmt.Sprintf("%sSeederStorage", ss.name), func(t *testing.T) {
- for _, responsive := range []bool{false, true} {
- t.Run(fmt.Sprintf("Responsive=%v", responsive), func(t *testing.T) {
- t.Run("NoReadahead", func(t *testing.T) {
- testClientTransfer(t, testClientTransferParams{
- Responsive: responsive,
- SeederStorage: ss.f,
- LeecherStorage: ls.f,
- })
- })
- for _, readahead := range []int64{-1, 0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 20} {
- t.Run(fmt.Sprintf("readahead=%v", readahead), func(t *testing.T) {
- testClientTransfer(t, testClientTransferParams{
- SeederStorage: ss.f,
- Responsive: responsive,
- SetReadahead: true,
- Readahead: readahead,
- LeecherStorage: ls.f,
- })
- })
- }
- })
- }
- })
- }
- })
- }
-}
-
-type testClientTransferParams struct {
- Responsive bool
- Readahead int64
- SetReadahead bool
- ExportClientStatus bool
- LeecherStorage func(string) storage.ClientImpl
- SeederStorage func(string) storage.ClientImpl
- SeederUploadRateLimiter *rate.Limiter
- LeecherDownloadRateLimiter *rate.Limiter
-}
-
-// Creates a seeder and a leecher, and ensures the data transfers when a read
-// is attempted on the leecher.
-func testClientTransfer(t *testing.T, ps testClientTransferParams) {
- greetingTempDir, mi := testutil.GreetingTestTorrent()
- defer os.RemoveAll(greetingTempDir)
- // Create seeder and a Torrent.
- cfg := TestingConfig()
- cfg.Seed = true
- if ps.SeederUploadRateLimiter != nil {
- cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
- }
- // cfg.ListenAddr = "localhost:4000"
- if ps.SeederStorage != nil {
- cfg.DefaultStorage = ps.SeederStorage(greetingTempDir)
- defer cfg.DefaultStorage.Close()
- } else {
- cfg.DataDir = greetingTempDir
- }
- seeder, err := NewClient(cfg)
- require.NoError(t, err)
- if ps.ExportClientStatus {
- defer testutil.ExportStatusWriter(seeder, "s")()
- }
- seederTorrent, _, _ := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
- // Run a Stats right after Closing the Client. This will trigger the Stats
- // panic in #214 caused by RemoteAddr on Closed uTP sockets.
- defer seederTorrent.Stats()
- defer seeder.Close()
- seederTorrent.VerifyData()
- // Create leecher and a Torrent.
- leecherDataDir, err := ioutil.TempDir("", "")
- require.NoError(t, err)
- defer os.RemoveAll(leecherDataDir)
- cfg = TestingConfig()
- if ps.LeecherStorage == nil {
- cfg.DataDir = leecherDataDir
- } else {
- cfg.DefaultStorage = ps.LeecherStorage(leecherDataDir)
- }
- if ps.LeecherDownloadRateLimiter != nil {
- cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
- }
- cfg.Seed = false
- //cfg.Debug = true
- leecher, err := NewClient(cfg)
- require.NoError(t, err)
- defer leecher.Close()
- if ps.ExportClientStatus {
- defer testutil.ExportStatusWriter(leecher, "l")()
- }
- leecherTorrent, new, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
- ret = TorrentSpecFromMetaInfo(mi)
- ret.ChunkSize = 2
- return
- }())
- require.NoError(t, err)
- assert.True(t, new)
-
- //// This was used when observing coalescing of piece state changes.
- //logPieceStateChanges(leecherTorrent)
-
- // Now do some things with leecher and seeder.
- leecherTorrent.AddClientPeer(seeder)
- // The Torrent should not be interested in obtaining peers, so the one we
- // just added should be the only one.
- assert.False(t, leecherTorrent.Seeding())
- assert.EqualValues(t, 1, leecherTorrent.Stats().PendingPeers)
- r := leecherTorrent.NewReader()
- defer r.Close()
- if ps.Responsive {
- r.SetResponsive()
- }
- if ps.SetReadahead {
- r.SetReadahead(ps.Readahead)
- }
- assertReadAllGreeting(t, r)
-
- seederStats := seederTorrent.Stats()
- assert.True(t, 13 <= seederStats.BytesWrittenData.Int64())
- assert.True(t, 8 <= seederStats.ChunksWritten.Int64())
-
- leecherStats := leecherTorrent.Stats()
- assert.True(t, 13 <= leecherStats.BytesReadData.Int64())
- assert.True(t, 8 <= leecherStats.ChunksRead.Int64())
-
- // Try reading through again for the cases where the torrent data size
- // exceeds the size of the cache.
- assertReadAllGreeting(t, r)
-}
-
-func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
- pos, err := r.Seek(0, io.SeekStart)
- assert.NoError(t, err)
- assert.EqualValues(t, 0, pos)
- _greeting, err := ioutil.ReadAll(r)
- assert.NoError(t, err)
- assert.EqualValues(t, testutil.GreetingFileContents, _greeting)
-}
-
-// Check that after completing leeching, a leecher transitions to a seeding
-// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
-func TestSeedAfterDownloading(t *testing.T) {
- greetingTempDir, mi := testutil.GreetingTestTorrent()
- defer os.RemoveAll(greetingTempDir)
-
- cfg := TestingConfig()
- cfg.Seed = true
- cfg.DataDir = greetingTempDir
- seeder, err := NewClient(cfg)
- require.NoError(t, err)
- defer seeder.Close()
- defer testutil.ExportStatusWriter(seeder, "s")()
- seederTorrent, ok, err := seeder.AddTorrentSpec(TorrentSpecFromMetaInfo(mi))
- require.NoError(t, err)
- assert.True(t, ok)
- seederTorrent.VerifyData()
-
- cfg = TestingConfig()
- cfg.Seed = true
- cfg.DataDir, err = ioutil.TempDir("", "")
- require.NoError(t, err)
- defer os.RemoveAll(cfg.DataDir)
- leecher, err := NewClient(cfg)
- require.NoError(t, err)
- defer leecher.Close()
- defer testutil.ExportStatusWriter(leecher, "l")()
-
- cfg = TestingConfig()
- cfg.Seed = false
- cfg.DataDir, err = ioutil.TempDir("", "")
- require.NoError(t, err)
- defer os.RemoveAll(cfg.DataDir)
- leecherLeecher, _ := NewClient(cfg)
- require.NoError(t, err)
- defer leecherLeecher.Close()
- defer testutil.ExportStatusWriter(leecherLeecher, "ll")()
- leecherGreeting, ok, err := leecher.AddTorrentSpec(func() (ret *TorrentSpec) {
- ret = TorrentSpecFromMetaInfo(mi)
- ret.ChunkSize = 2
- return
- }())
- require.NoError(t, err)
- assert.True(t, ok)
- llg, ok, err := leecherLeecher.AddTorrentSpec(func() (ret *TorrentSpec) {
- ret = TorrentSpecFromMetaInfo(mi)
- ret.ChunkSize = 3
- return
- }())
- require.NoError(t, err)
- assert.True(t, ok)
- // Simultaneously DownloadAll in Leecher, and read the contents
- // consecutively in LeecherLeecher. This non-deterministically triggered a
- // case where the leecher wouldn't unchoke the LeecherLeecher.
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := llg.NewReader()
- defer r.Close()
- b, err := ioutil.ReadAll(r)
- require.NoError(t, err)
- assert.EqualValues(t, testutil.GreetingFileContents, b)
- }()
- done := make(chan struct{})
- defer close(done)
- go leecherGreeting.AddClientPeer(seeder)
- go leecherGreeting.AddClientPeer(leecherLeecher)
- wg.Add(1)
- go func() {
- defer wg.Done()
- leecherGreeting.DownloadAll()
- leecher.WaitAll()
- }()
- wg.Wait()
-}
-
func TestMergingTrackersByAddingSpecs(t *testing.T) {
cl, err := NewClient(TestingConfig())
require.NoError(t, err)
github.com/anacrolix/missinggo/perf v1.0.0
github.com/anacrolix/missinggo/v2 v2.3.2-0.20200110051601-fc3212fb3984
github.com/anacrolix/multiless v0.0.0-20191223025854-070b7994e841
+ github.com/anacrolix/stm v0.2.0
github.com/anacrolix/sync v0.2.0
github.com/anacrolix/tagflag v1.0.1
github.com/anacrolix/upnp v0.1.1
--- /dev/null
+package tmproot
+
+import (
+ "io/ioutil"
+ "os"
+ "sync"
+)
+
+type Dir struct {
+ mu sync.Mutex
+ path string
+ inited bool
+}
+
+func (me *Dir) init(prefix string) bool {
+ if me.inited {
+ return false
+ }
+ var err error
+ me.path, err = ioutil.TempDir("", prefix)
+ if err != nil {
+ panic(err)
+ }
+ me.inited = true
+ return true
+}
+
+func (me *Dir) Init(prefix string) {
+ me.mu.Lock()
+ defer me.mu.Unlock()
+ if me.inited {
+ panic("already inited")
+ }
+ me.init(prefix)
+}
+
+func (me *Dir) lazyDefaultInit() {
+ me.mu.Lock()
+ defer me.mu.Unlock()
+ me.init("")
+
+}
+
+func (me *Dir) NewSub() string {
+ me.lazyDefaultInit()
+ ret, err := ioutil.TempDir(me.path, "")
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
+
+func (me *Dir) RemoveAll() error {
+ me.lazyDefaultInit()
+ return os.RemoveAll(me.path)
+}
package torrent
import (
- "io/ioutil"
"log"
"os"
"testing"
)
-// A top-level temp dir that lasts for the duration of the package tests, and
-// is removed at completion.
-var pkgTempDir string
-
func init() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
- var err error
- pkgTempDir, err = ioutil.TempDir("", "torrent.test")
- if err != nil {
- panic(err)
- }
-}
-
-func tempDir() string {
- ret, err := ioutil.TempDir(pkgTempDir, "")
- if err != nil {
- panic(err)
- }
- return ret
}
func TestMain(m *testing.M) {
+ TestingTempDir.Init("torrent.test")
code := m.Run()
- os.RemoveAll(pkgTempDir)
+ TestingTempDir.RemoveAll()
// select {}
os.Exit(code)
}
--- /dev/null
+package test
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/anacrolix/missinggo/v2/filecache"
+ "github.com/anacrolix/torrent"
+ "github.com/anacrolix/torrent/internal/testutil"
+ "github.com/anacrolix/torrent/storage"
+ "golang.org/x/time/rate"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type testClientTransferParams struct {
+ Responsive bool
+ Readahead int64
+ SetReadahead bool
+ ExportClientStatus bool
+ LeecherStorage func(string) storage.ClientImpl
+ SeederStorage func(string) storage.ClientImpl
+ SeederUploadRateLimiter *rate.Limiter
+ LeecherDownloadRateLimiter *rate.Limiter
+}
+
+func assertReadAllGreeting(t *testing.T, r io.ReadSeeker) {
+ pos, err := r.Seek(0, io.SeekStart)
+ assert.NoError(t, err)
+ assert.EqualValues(t, 0, pos)
+ _greeting, err := ioutil.ReadAll(r)
+ assert.NoError(t, err)
+ assert.EqualValues(t, testutil.GreetingFileContents, _greeting)
+}
+
+// Creates a seeder and a leecher, and ensures the data transfers when a read
+// is attempted on the leecher.
+func testClientTransfer(t *testing.T, ps testClientTransferParams) {
+ greetingTempDir, mi := testutil.GreetingTestTorrent()
+ defer os.RemoveAll(greetingTempDir)
+ // Create seeder and a Torrent.
+ cfg := torrent.TestingConfig()
+ cfg.Seed = true
+ if ps.SeederUploadRateLimiter != nil {
+ cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
+ }
+ // cfg.ListenAddr = "localhost:4000"
+ if ps.SeederStorage != nil {
+ cfg.DefaultStorage = ps.SeederStorage(greetingTempDir)
+ defer cfg.DefaultStorage.Close()
+ } else {
+ cfg.DataDir = greetingTempDir
+ }
+ seeder, err := torrent.NewClient(cfg)
+ require.NoError(t, err)
+ if ps.ExportClientStatus {
+ defer testutil.ExportStatusWriter(seeder, "s")()
+ }
+ seederTorrent, _, _ := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
+ // Run a Stats right after Closing the Client. This will trigger the Stats
+ // panic in #214 caused by RemoteAddr on Closed uTP sockets.
+ defer seederTorrent.Stats()
+ defer seeder.Close()
+ seederTorrent.VerifyData()
+ // Create leecher and a Torrent.
+ leecherDataDir, err := ioutil.TempDir("", "")
+ require.NoError(t, err)
+ defer os.RemoveAll(leecherDataDir)
+ cfg = torrent.TestingConfig()
+ if ps.LeecherStorage == nil {
+ cfg.DataDir = leecherDataDir
+ } else {
+ cfg.DefaultStorage = ps.LeecherStorage(leecherDataDir)
+ }
+ if ps.LeecherDownloadRateLimiter != nil {
+ cfg.DownloadRateLimiter = ps.LeecherDownloadRateLimiter
+ }
+ cfg.Seed = false
+ //cfg.Debug = true
+ leecher, err := torrent.NewClient(cfg)
+ require.NoError(t, err)
+ defer leecher.Close()
+ if ps.ExportClientStatus {
+ defer testutil.ExportStatusWriter(leecher, "l")()
+ }
+ leecherTorrent, new, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
+ ret = torrent.TorrentSpecFromMetaInfo(mi)
+ ret.ChunkSize = 2
+ return
+ }())
+ require.NoError(t, err)
+ assert.True(t, new)
+
+ //// This was used when observing coalescing of piece state changes.
+ //logPieceStateChanges(leecherTorrent)
+
+ // Now do some things with leecher and seeder.
+ leecherTorrent.AddClientPeer(seeder)
+ // The Torrent should not be interested in obtaining peers, so the one we
+ // just added should be the only one.
+ assert.False(t, leecherTorrent.Seeding())
+ assert.EqualValues(t, 1, leecherTorrent.Stats().PendingPeers)
+ r := leecherTorrent.NewReader()
+ defer r.Close()
+ if ps.Responsive {
+ r.SetResponsive()
+ }
+ if ps.SetReadahead {
+ r.SetReadahead(ps.Readahead)
+ }
+ assertReadAllGreeting(t, r)
+
+ seederStats := seederTorrent.Stats()
+ assert.True(t, 13 <= seederStats.BytesWrittenData.Int64())
+ assert.True(t, 8 <= seederStats.ChunksWritten.Int64())
+
+ leecherStats := leecherTorrent.Stats()
+ assert.True(t, 13 <= leecherStats.BytesReadData.Int64())
+ assert.True(t, 8 <= leecherStats.ChunksRead.Int64())
+
+ // Try reading through again for the cases where the torrent data size
+ // exceeds the size of the cache.
+ assertReadAllGreeting(t, r)
+}
+
+type fileCacheClientStorageFactoryParams struct {
+ Capacity int64
+ SetCapacity bool
+ Wrapper func(*filecache.Cache) storage.ClientImpl
+}
+
+func newFileCacheClientStorageFactory(ps fileCacheClientStorageFactoryParams) storageFactory {
+ return func(dataDir string) storage.ClientImpl {
+ fc, err := filecache.NewCache(dataDir)
+ if err != nil {
+ panic(err)
+ }
+ if ps.SetCapacity {
+ fc.SetCapacity(ps.Capacity)
+ }
+ return ps.Wrapper(fc)
+ }
+}
+
+type storageFactory func(string) storage.ClientImpl
+
+func TestClientTransferDefault(t *testing.T) {
+ testClientTransfer(t, testClientTransferParams{
+ ExportClientStatus: true,
+ LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
+ Wrapper: fileCachePieceResourceStorage,
+ }),
+ })
+}
+
+func TestClientTransferRateLimitedUpload(t *testing.T) {
+ started := time.Now()
+ testClientTransfer(t, testClientTransferParams{
+ // We are uploading 13 bytes (the length of the greeting torrent). The
+ // chunks are 2 bytes in length. Then the smallest burst we can run
+ // with is 2. Time taken is (13-burst)/rate.
+ SeederUploadRateLimiter: rate.NewLimiter(11, 2),
+ ExportClientStatus: true,
+ })
+ require.True(t, time.Since(started) > time.Second)
+}
+
+func TestClientTransferRateLimitedDownload(t *testing.T) {
+ testClientTransfer(t, testClientTransferParams{
+ LeecherDownloadRateLimiter: rate.NewLimiter(512, 512),
+ })
+}
+
+func fileCachePieceResourceStorage(fc *filecache.Cache) storage.ClientImpl {
+ return storage.NewResourcePieces(fc.AsResourceProvider())
+}
+
+func testClientTransferSmallCache(t *testing.T, setReadahead bool, readahead int64) {
+ testClientTransfer(t, testClientTransferParams{
+ LeecherStorage: newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
+ SetCapacity: true,
+ // Going below the piece length means it can't complete a piece so
+ // that it can be hashed.
+ Capacity: 5,
+ Wrapper: fileCachePieceResourceStorage,
+ }),
+ SetReadahead: setReadahead,
+ // Can't readahead too far or the cache will thrash and drop data we
+ // thought we had.
+ Readahead: readahead,
+ ExportClientStatus: true,
+ })
+}
+
+func TestClientTransferSmallCachePieceSizedReadahead(t *testing.T) {
+ testClientTransferSmallCache(t, true, 5)
+}
+
+func TestClientTransferSmallCacheLargeReadahead(t *testing.T) {
+ testClientTransferSmallCache(t, true, 15)
+}
+
+func TestClientTransferSmallCacheDefaultReadahead(t *testing.T) {
+ testClientTransferSmallCache(t, false, -1)
+}
+
+func TestClientTransferVarious(t *testing.T) {
+ // Leecher storage
+ for _, ls := range []struct {
+ name string
+ f storageFactory
+ }{
+ {"Filecache", newFileCacheClientStorageFactory(fileCacheClientStorageFactoryParams{
+ Wrapper: fileCachePieceResourceStorage,
+ })},
+ {"Boltdb", storage.NewBoltDB},
+ } {
+ t.Run(fmt.Sprintf("LeecherStorage=%s", ls.name), func(t *testing.T) {
+ // Seeder storage
+ for _, ss := range []struct {
+ name string
+ f func(string) storage.ClientImpl
+ }{
+ {"File", storage.NewFile},
+ {"Mmap", storage.NewMMap},
+ } {
+ t.Run(fmt.Sprintf("%sSeederStorage", ss.name), func(t *testing.T) {
+ for _, responsive := range []bool{false, true} {
+ t.Run(fmt.Sprintf("Responsive=%v", responsive), func(t *testing.T) {
+ t.Run("NoReadahead", func(t *testing.T) {
+ testClientTransfer(t, testClientTransferParams{
+ Responsive: responsive,
+ SeederStorage: ss.f,
+ LeecherStorage: ls.f,
+ })
+ })
+ for _, readahead := range []int64{-1, 0, 1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 20} {
+ t.Run(fmt.Sprintf("readahead=%v", readahead), func(t *testing.T) {
+ testClientTransfer(t, testClientTransferParams{
+ SeederStorage: ss.f,
+ Responsive: responsive,
+ SetReadahead: true,
+ Readahead: readahead,
+ LeecherStorage: ls.f,
+ })
+ })
+ }
+ })
+ }
+ })
+ }
+ })
+ }
+}
+
+// Check that after completing leeching, a leecher transitions to a seeding
+// correctly. Connected in a chain like so: Seeder <-> Leecher <-> LeecherLeecher.
+func TestSeedAfterDownloading(t *testing.T) {
+ greetingTempDir, mi := testutil.GreetingTestTorrent()
+ defer os.RemoveAll(greetingTempDir)
+
+ cfg := torrent.TestingConfig()
+ cfg.Seed = true
+ cfg.DataDir = greetingTempDir
+ seeder, err := torrent.NewClient(cfg)
+ require.NoError(t, err)
+ defer seeder.Close()
+ defer testutil.ExportStatusWriter(seeder, "s")()
+ seederTorrent, ok, err := seeder.AddTorrentSpec(torrent.TorrentSpecFromMetaInfo(mi))
+ require.NoError(t, err)
+ assert.True(t, ok)
+ seederTorrent.VerifyData()
+
+ cfg = torrent.TestingConfig()
+ cfg.Seed = true
+ cfg.DataDir, err = ioutil.TempDir("", "")
+ require.NoError(t, err)
+ defer os.RemoveAll(cfg.DataDir)
+ leecher, err := torrent.NewClient(cfg)
+ require.NoError(t, err)
+ defer leecher.Close()
+ defer testutil.ExportStatusWriter(leecher, "l")()
+
+ cfg = torrent.TestingConfig()
+ cfg.Seed = false
+ cfg.DataDir, err = ioutil.TempDir("", "")
+ require.NoError(t, err)
+ defer os.RemoveAll(cfg.DataDir)
+ leecherLeecher, _ := torrent.NewClient(cfg)
+ require.NoError(t, err)
+ defer leecherLeecher.Close()
+ defer testutil.ExportStatusWriter(leecherLeecher, "ll")()
+ leecherGreeting, ok, err := leecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
+ ret = torrent.TorrentSpecFromMetaInfo(mi)
+ ret.ChunkSize = 2
+ return
+ }())
+ require.NoError(t, err)
+ assert.True(t, ok)
+ llg, ok, err := leecherLeecher.AddTorrentSpec(func() (ret *torrent.TorrentSpec) {
+ ret = torrent.TorrentSpecFromMetaInfo(mi)
+ ret.ChunkSize = 3
+ return
+ }())
+ require.NoError(t, err)
+ assert.True(t, ok)
+ // Simultaneously DownloadAll in Leecher, and read the contents
+ // consecutively in LeecherLeecher. This non-deterministically triggered a
+ // case where the leecher wouldn't unchoke the LeecherLeecher.
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ r := llg.NewReader()
+ defer r.Close()
+ b, err := ioutil.ReadAll(r)
+ require.NoError(t, err)
+ assert.EqualValues(t, testutil.GreetingFileContents, b)
+ }()
+ done := make(chan struct{})
+ defer close(done)
+ go leecherGreeting.AddClientPeer(seeder)
+ go leecherGreeting.AddClientPeer(leecherLeecher)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ leecherGreeting.DownloadAll()
+ leecher.WaitAll()
+ }()
+ wg.Wait()
+}
--- /dev/null
+package torrent
+
+import (
+ "github.com/anacrolix/torrent/internal/tmproot"
+)
+
+var TestingTempDir tmproot.Dir
+
+func TestingConfig() *ClientConfig {
+ cfg := NewDefaultClientConfig()
+ cfg.ListenHost = LoopbackListenHost
+ cfg.NoDHT = true
+ cfg.DataDir = TestingTempDir.NewSub()
+ cfg.DisableTrackers = true
+ cfg.NoDefaultPortForwarding = true
+ cfg.DisableAcceptRateLimiting = true
+ cfg.ListenPort = 0
+ //cfg.Debug = true
+ //cfg.Logger = cfg.Logger.WithText(func(m log.Msg) string {
+ // t := m.Text()
+ // m.Values(func(i interface{}) bool {
+ // t += fmt.Sprintf("\n%[1]T: %[1]v", i)
+ // return true
+ // })
+ // return t
+ //})
+ return cfg
+}