From ded6c19edbe1c5382dafc09c178c7d3ce111a84f Mon Sep 17 00:00:00 2001 From: Matt Joiner Date: Tue, 5 Jan 2021 16:58:45 +1100 Subject: [PATCH] Add the DropMutuallyCompletePeers ClientConfig field --- client_test.go | 5 ++++- config.go | 5 +++++ peerconn.go | 2 ++ test/transfer_test.go | 11 ++++++++++- torrent.go | 21 +++++++++++++++++++++ 5 files changed, 42 insertions(+), 2 deletions(-) diff --git a/client_test.go b/client_test.go index dcb6d0be..44a7e5fb 100644 --- a/client_test.go +++ b/client_test.go @@ -527,7 +527,9 @@ func TestTorrentDownloadAllThenCancel(t *testing.T) { // Ensure that it's an error for a peer to send an invalid have message. func TestPeerInvalidHave(t *testing.T) { - cl, err := NewClient(TestingConfig()) + cfg := TestingConfig() + cfg.DropMutuallyCompletePeers = false + cl, err := NewClient(cfg) require.NoError(t, err) defer cl.Close() info := metainfo.Info{ @@ -548,6 +550,7 @@ func TestPeerInvalidHave(t *testing.T) { cn := &PeerConn{peer: peer{ t: tt, }} + cn.peerImpl = cn assert.NoError(t, cn.peerSentHave(0)) assert.Error(t, cn.peerSentHave(1)) } diff --git a/config.go b/config.go index f4c0f172..0cfde9d0 100644 --- a/config.go +++ b/config.go @@ -124,6 +124,10 @@ type ClientConfig struct { // Don't add connections that have the same peer ID as an existing // connection for a given Torrent. DropDuplicatePeerIds bool + // Drop peers that are complete if we are also complete and have no use for the peer. This is a + // bit of a special case, since a peer could also be useless if they're just not interested, or + // we don't intend to obtain all of a torrent's data. + DropMutuallyCompletePeers bool ConnTracker *conntrack.Instance @@ -170,6 +174,7 @@ func NewDefaultClientConfig() *ClientConfig { DownloadRateLimiter: unlimited, ConnTracker: conntrack.NewInstance(), DisableAcceptRateLimiting: true, + DropMutuallyCompletePeers: true, HeaderObfuscationPolicy: HeaderObfuscationPolicy{ Preferred: true, RequirePreferred: false, diff --git a/peerconn.go b/peerconn.go index 28f7d005..43b66770 100644 --- a/peerconn.go +++ b/peerconn.go @@ -843,6 +843,7 @@ func (cn *PeerConn) peerPiecesChanged() { cn.updateRequests() } } + cn.t.maybeDropMutuallyCompletePeer(&cn.peer) } func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) { @@ -860,6 +861,7 @@ func (cn *PeerConn) peerSentHave(piece pieceIndex) error { } cn.raisePeerMinPieces(piece + 1) cn._peerPieces.Set(bitmap.BitIndex(piece), true) + cn.t.maybeDropMutuallyCompletePeer(&cn.peer) if cn.updatePiecePriority(piece) { cn.updateRequests() } diff --git a/test/transfer_test.go b/test/transfer_test.go index 6c955b36..60678425 100644 --- a/test/transfer_test.go +++ b/test/transfer_test.go @@ -53,6 +53,8 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) { // Create seeder and a Torrent. cfg := torrent.TestingConfig() cfg.Seed = true + // Some test instances don't like this being on, even when there's no cache involved. + cfg.DropMutuallyCompletePeers = false if ps.SeederUploadRateLimiter != nil { cfg.UploadRateLimiter = ps.SeederUploadRateLimiter } @@ -84,6 +86,8 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) { require.NoError(t, err) defer os.RemoveAll(leecherDataDir) cfg = torrent.TestingConfig() + // See the seeder client config comment. + cfg.DropMutuallyCompletePeers = false if ps.LeecherStorage == nil { cfg.DataDir = leecherDataDir } else { @@ -142,7 +146,12 @@ func testClientTransfer(t *testing.T, ps testClientTransferParams) { assertReadAllGreeting(t, r) assert.NotEmpty(t, seederTorrent.PeerConns()) leecherPeerConns := leecherTorrent.PeerConns() - assert.NotEmpty(t, leecherPeerConns) + if cfg.DropMutuallyCompletePeers { + // I don't think we can assume it will be empty already, due to timing. + //assert.Empty(t, leecherPeerConns) + } else { + assert.NotEmpty(t, leecherPeerConns) + } foundSeeder := false for _, pc := range leecherPeerConns { completed := pc.PeerPieces().Len() diff --git a/torrent.go b/torrent.go index 16d4a483..ffb452d4 100644 --- a/torrent.go +++ b/torrent.go @@ -826,6 +826,26 @@ func (t *Torrent) havePiece(index pieceIndex) bool { return t.haveInfo() && t.pieceComplete(index) } +func (t *Torrent) maybeDropMutuallyCompletePeer( + // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's okay? + p *peer, +) { + if !t.cl.config.DropMutuallyCompletePeers { + return + } + if !t.haveAllPieces() { + return + } + if all, known := p.peerHasAllPieces(); !(known && all) { + return + } + if p.useful() { + return + } + log.Printf("dropping %v, which is mutually complete", p) + p.drop() +} + func (t *Torrent) haveChunk(r request) (ret bool) { // defer func() { // log.Println("have chunk", r, ret) @@ -1808,6 +1828,7 @@ func (t *Torrent) onPieceCompleted(piece pieceIndex) { t.cancelRequestsForPiece(piece) for conn := range t.conns { conn.have(piece) + t.maybeDropMutuallyCompletePeer(&conn.peer) } } -- 2.48.1