// Ensure that it's an error for a peer to send an invalid have message.
func TestPeerInvalidHave(t *testing.T) {
- cl, err := NewClient(TestingConfig())
+ cfg := TestingConfig()
+ cfg.DropMutuallyCompletePeers = false
+ cl, err := NewClient(cfg)
require.NoError(t, err)
defer cl.Close()
info := metainfo.Info{
cn := &PeerConn{peer: peer{
t: tt,
}}
+ cn.peerImpl = cn
assert.NoError(t, cn.peerSentHave(0))
assert.Error(t, cn.peerSentHave(1))
}
// Don't add connections that have the same peer ID as an existing
// connection for a given Torrent.
DropDuplicatePeerIds bool
+ // Drop peers that are complete if we are also complete and have no use for the peer. This is a
+ // bit of a special case, since a peer could also be useless if they're just not interested, or
+ // we don't intend to obtain all of a torrent's data.
+ DropMutuallyCompletePeers bool
ConnTracker *conntrack.Instance
DownloadRateLimiter: unlimited,
ConnTracker: conntrack.NewInstance(),
DisableAcceptRateLimiting: true,
+ DropMutuallyCompletePeers: true,
HeaderObfuscationPolicy: HeaderObfuscationPolicy{
Preferred: true,
RequirePreferred: false,
cn.updateRequests()
}
}
+ cn.t.maybeDropMutuallyCompletePeer(&cn.peer)
}
func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
}
cn.raisePeerMinPieces(piece + 1)
cn._peerPieces.Set(bitmap.BitIndex(piece), true)
+ cn.t.maybeDropMutuallyCompletePeer(&cn.peer)
if cn.updatePiecePriority(piece) {
cn.updateRequests()
}
// Create seeder and a Torrent.
cfg := torrent.TestingConfig()
cfg.Seed = true
+ // Some test instances don't like this being on, even when there's no cache involved.
+ cfg.DropMutuallyCompletePeers = false
if ps.SeederUploadRateLimiter != nil {
cfg.UploadRateLimiter = ps.SeederUploadRateLimiter
}
require.NoError(t, err)
defer os.RemoveAll(leecherDataDir)
cfg = torrent.TestingConfig()
+ // See the seeder client config comment.
+ cfg.DropMutuallyCompletePeers = false
if ps.LeecherStorage == nil {
cfg.DataDir = leecherDataDir
} else {
assertReadAllGreeting(t, r)
assert.NotEmpty(t, seederTorrent.PeerConns())
leecherPeerConns := leecherTorrent.PeerConns()
- assert.NotEmpty(t, leecherPeerConns)
+ if cfg.DropMutuallyCompletePeers {
+ // I don't think we can assume it will be empty already, due to timing.
+ //assert.Empty(t, leecherPeerConns)
+ } else {
+ assert.NotEmpty(t, leecherPeerConns)
+ }
foundSeeder := false
for _, pc := range leecherPeerConns {
completed := pc.PeerPieces().Len()
return t.haveInfo() && t.pieceComplete(index)
}
+func (t *Torrent) maybeDropMutuallyCompletePeer(
+ // I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's okay?
+ p *peer,
+) {
+ if !t.cl.config.DropMutuallyCompletePeers {
+ return
+ }
+ if !t.haveAllPieces() {
+ return
+ }
+ if all, known := p.peerHasAllPieces(); !(known && all) {
+ return
+ }
+ if p.useful() {
+ return
+ }
+ log.Printf("dropping %v, which is mutually complete", p)
+ p.drop()
+}
+
func (t *Torrent) haveChunk(r request) (ret bool) {
// defer func() {
// log.Println("have chunk", r, ret)
t.cancelRequestsForPiece(piece)
for conn := range t.conns {
conn.have(piece)
+ t.maybeDropMutuallyCompletePeer(&conn.peer)
}
}