"net"
"net/http"
"net/netip"
+ "runtime"
"slices"
"strconv"
"time"
upnpMappings []*upnpMapping
webseedRequestTimer *time.Timer
+
+ activePieceHashers int
}
type ipStr string
}
func (cl *Client) torrentsAsSlice() (ret []*Torrent) {
+ ret = make([]*Torrent, 0, len(cl.torrents))
for t := range cl.torrents {
ret = append(ret, t)
}
}
return nil
}
+
+func (cl *Client) maxActivePieceHashers() int {
+ return runtime.NumCPU()
+}
+
+func (cl *Client) belowMaxActivePieceHashers() bool {
+ return cl.activePieceHashers < cl.maxActivePieceHashers()
+}
+
+func (cl *Client) canStartPieceHashers() bool {
+ return cl.belowMaxActivePieceHashers()
+}
+
+func (cl *Client) startPieceHashers() {
+ if !cl.canStartPieceHashers() {
+ return
+ }
+ ts := make([]*Torrent, 0, len(cl.torrents))
+ for t := range cl.torrents {
+ if !t.considerStartingHashers() {
+ continue
+ }
+ ts = append(ts, t)
+ }
+ // Sort largest torrents first, as those are preferred by webseeds, and will cause less thrashing.
+ slices.SortFunc(ts, func(a, b *Torrent) int {
+ return -cmp.Compare(a.length(), b.length())
+ })
+ for _, t := range ts {
+ t.startPieceHashers()
+ if !cl.canStartPieceHashers() {
+ break
+ }
+ }
+}
}
func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
- t.logger.LazyLog(log.Debug, func() log.Msg {
- return log.Fstr("hashed piece %d (passed=%t)", piece, passed)
- })
p := t.piece(piece)
p.numVerifies++
p.numVerifiesCond.Broadcast()
})
}
+// Torrent piece hashers are sticky and will try to keep hashing pieces in the same Torrent to keep
+// the storage hot.
func (t *Torrent) startPieceHashers() error {
if t.closed.IsSet() {
return errTorrentClosed
}
- for t.startPieceHasher() {
+ for t.considerStartingHashers() {
+ if !t.startSinglePieceHasher() {
+ break
+ }
}
return nil
}
-func (t *Torrent) startPieceHasher() bool {
- if t.storage == nil {
- return false
- }
- if t.activePieceHashes >= t.cl.config.PieceHashersPerTorrent {
- return false
- }
+func (t *Torrent) startSinglePieceHasher() bool {
pi := t.getPieceToHash()
- if pi.Ok {
- t.startHash(pi.Value)
- go t.pieceHasher(pi.Value)
- return true
+ if !pi.Ok {
+ return false
}
- return false
+ t.startHash(pi.Value)
+ go t.pieceHasher(pi.Value)
+ return true
}
+// Sticky to a Torrent. Might as well since that keeps the storage hot.
func (t *Torrent) pieceHasher(initial pieceIndex) {
t.finishHash(initial)
for {
t.cl.unlock()
t.finishHash(pi)
}
+ t.cl.startPieceHashers()
t.cl.unlock()
}
t.deferUpdateComplete()
p.hashing = true
t.deferPublishPieceStateChange(pi)
- t.updatePiecePriority(pi, "Torrent.startPieceHasher")
+ t.updatePiecePriority(pi, "Torrent.startHash")
t.storageLock.RLock()
t.activePieceHashes++
+ t.cl.activePieceHashers++
}
func (t *Torrent) getPieceToHash() (_ g.Option[pieceIndex]) {
t.pieceHashed(index, correct, copyErr)
t.updatePiecePriority(index, "Torrent.finishHash")
t.activePieceHashes--
+ t.cl.activePieceHashers--
}
// Return the connections that touched a piece, and clear the entries while doing it.
}
return nil
}
+
+func (t *Torrent) considerStartingHashers() bool {
+ if t.storage == nil {
+ return false
+ }
+ if t.activePieceHashes >= t.cl.config.PieceHashersPerTorrent {
+ return false
+ }
+ if !t.cl.canStartPieceHashers() {
+ return false
+ }
+ if t.piecesQueuedForHash.IsEmpty() {
+ return false
+ }
+ return true
+}