]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Webseeds favour requesting partial files
authorMatt Joiner <anacrolix@gmail.com>
Wed, 23 Jul 2025 03:44:10 +0000 (13:44 +1000)
committerMatt Joiner <anacrolix@gmail.com>
Wed, 23 Jul 2025 03:44:10 +0000 (13:44 +1000)
peerconn.go
requesting.go
torrent.go
webseed-requesting.go

index 1191093bd6f27c18a3cca40d2929978355061252..e0c9b3635f4bc9b7db34d552b8cf7f55784e0e7b 100644 (file)
@@ -14,6 +14,7 @@ import (
        "strings"
        "sync/atomic"
        "time"
+       "weak"
 
        "github.com/RoaringBitmap/roaring"
        "github.com/anacrolix/generics"
@@ -98,7 +99,6 @@ type PeerConn struct {
        // we may not even know the number of pieces in the torrent yet.
        peerSentHaveAll bool
 
-       // TODO: How are pending cancels handled for webseed peers?
        requestState requestStrategy.PeerRequestState
 
        peerRequestDataAllocLimiter alloclim.Limiter
@@ -1716,7 +1716,7 @@ func (cn *PeerConn) request(r RequestIndex) (more bool, err error) {
        }
        cn.validReceiveChunks[r]++
        cn.t.requestState[r] = requestState{
-               peer: cn,
+               peer: weak.Make(cn),
                when: time.Now(),
        }
        cn.updateExpectingChunks()
index c45a502b06d297958c919dfa782752d0033c2688..5ec3391d3d1c3f1bae1bd1af976598a51a1cdf59 100644 (file)
@@ -127,8 +127,8 @@ func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex
        }
        leftRequestState := t.requestState[leftRequest]
        rightRequestState := t.requestState[rightRequest]
-       leftPeer := leftRequestState.peer
-       rightPeer := rightRequestState.peer
+       leftPeer := leftRequestState.peer.Value()
+       rightPeer := rightRequestState.peer.Value()
        // Prefer chunks already requested from this peer.
        ml = ml.Bool(rightPeer == p.peer, leftPeer == p.peer)
        // Prefer unrequested chunks.
index 1fa8ca784c1cff33e0b207f424874e79418928b3..a69fa6bca6791b954b4ab1cb7a40aaee31cc9bff 100644 (file)
@@ -21,6 +21,7 @@ import (
        "time"
        "unique"
        "unsafe"
+       "weak"
 
        "github.com/RoaringBitmap/roaring"
        "github.com/anacrolix/chansync"
@@ -1863,6 +1864,9 @@ func (t *Torrent) deletePeerConn(c *PeerConn) (ret bool) {
        }
        torrent.Add("deleted connections", 1)
        c.deleteAllRequests("Torrent.deletePeerConn")
+       if len(t.conns) == 0 {
+               panicif.NotZero(len(t.requestState))
+       }
        t.assertPendingRequests()
        if t.numActivePeers() == 0 && len(t.connsWithAllPieces) != 0 {
                panic(t.connsWithAllPieces)
@@ -3167,8 +3171,10 @@ func (t *Torrent) cancelRequest(r RequestIndex) *PeerConn {
        return p
 }
 
-func (t *Torrent) requestingPeer(r RequestIndex) *PeerConn {
-       return t.requestState[r].peer
+func (t *Torrent) requestingPeer(r RequestIndex) (ret *PeerConn) {
+       ret = t.requestState[r].peer.Value()
+       panicif.Nil(ret)
+       return
 }
 
 func (t *Torrent) addConnWithAllPieces(p *Peer) {
@@ -3229,7 +3235,7 @@ func (t *Torrent) GetWebRtcPeerConnStats() map[string]webRtcStatsReports {
 }
 
 type requestState struct {
-       peer *PeerConn
+       peer weak.Pointer[PeerConn]
        when time.Time
 }
 
@@ -3558,3 +3564,31 @@ func (t *Torrent) considerStartingHashers() bool {
        }
        return true
 }
+
+func (t *Torrent) getFile(fileIndex int) *File {
+       return (*t.files)[fileIndex]
+}
+
+func (t *Torrent) fileMightBePartial(fileIndex int) bool {
+       f := t.getFile(fileIndex)
+       beginPieceIndex := f.BeginPieceIndex()
+       endPieceIndex := f.EndPieceIndex()
+       if t.dirtyChunks.IntersectsWithInterval(
+               uint64(t.pieceRequestIndexBegin(beginPieceIndex)),
+               uint64(t.pieceRequestIndexBegin(endPieceIndex)),
+       ) {
+               // We have dirty chunks. Even if the file is complete, this could mean a partial file has
+               // been started.
+               return true
+       }
+       var r roaring.Bitmap
+       r.AddRange(uint64(beginPieceIndex), uint64(endPieceIndex))
+       switch t._completedPieces.AndCardinality(&r) {
+       case 0, uint64(endPieceIndex - beginPieceIndex):
+               // We have either no pieces or all pieces and no dirty chunks.
+               return false
+       default:
+               // We're somewhere in-between.
+               return true
+       }
+}
index 5d50f949c17ae1c6c3e61f6b27de8a5d91dc12dc..6cd886554c53e81b465b82ca65986eccc5dcc0f1 100644 (file)
@@ -99,11 +99,15 @@ func (cl *Client) updateWebseedRequests() {
        aprioriHeap := heap.InterfaceForSlice(
                &heapSlice,
                func(l heapElem, r heapElem) bool {
-                       // Prefer the highest priority, then existing requests, then largest files.
                        return cmp.Or(
+                               // Prefer highest priority
                                -cmp.Compare(l.priority, r.priority),
-                               // Existing requests are assigned the priority of the piece they're reading next.
+                               // Then existing requests
                                compareBool(l.existingWebseedRequest == nil, r.existingWebseedRequest == nil),
+                               // Prefer not competing with active peer connections.
+                               compareBool(len(l.t.conns) > 0, len(r.t.conns) > 0),
+                               // Try to complete partial files first.
+                               -compareBool(l.t.fileMightBePartial(l.fileIndex), r.t.fileMightBePartial(r.fileIndex)),
                                // Note this isn't correct if the starting piece is split across multiple files. But
                                // I plan to refactor to key on starting piece to handle this case.
                                -cmp.Compare(
@@ -351,7 +355,7 @@ func (cl *Client) scheduleImmediateWebseedRequestUpdate() {
        }
        // Set the timer to fire right away (this will coalesce consecutive updates without forcing an
        // update on every call to this method). Since we're holding the Client lock, and we cancelled
-       // the timer and it wasn't active, nobody else should have reset it before us.
+       // the timer, and it wasn't active, nobody else should have reset it before us.
        panicif.True(cl.webseedRequestTimer.Reset(0))
 }