]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Export request_strategy.GetRequestablePieces
authorMatt Joiner <anacrolix@gmail.com>
Sat, 18 Sep 2021 08:57:50 +0000 (18:57 +1000)
committerMatt Joiner <anacrolix@gmail.com>
Sat, 18 Sep 2021 08:57:50 +0000 (18:57 +1000)
request-strategy/order.go
request-strategy/torrent.go
requesting.go

index 8b4b67998d691fc506410d9fad4d17af4c26faa3..0a32ae9a40fa47ae8b2b85dc79a33682fd8383a0 100644 (file)
@@ -89,13 +89,13 @@ type filterPiece struct {
        *Piece
 }
 
-func getRequestablePieces(input Input) (ret []requestablePiece) {
+// Calls f with requestable pieces in order.
+func GetRequestablePieces(input Input, f func(t *Torrent, p *Piece, pieceIndex int)) {
        maxPieces := 0
        for i := range input.Torrents {
                maxPieces += len(input.Torrents[i].Pieces)
        }
        pieces := make([]filterPiece, 0, maxPieces)
-       ret = make([]requestablePiece, 0, maxPieces)
        // Storage capacity left for this run, keyed by the storage capacity pointer on the storage
        // TorrentImpl. A nil value means no capacity limit.
        storageLeft := make(map[storage.TorrentCapacity]*int64)
@@ -147,13 +147,7 @@ func getRequestablePieces(input Input) (ret []requestablePiece) {
                }
                piece.t.unverifiedBytes += piece.Length
                allTorrentsUnverifiedBytes += piece.Length
-               ret = append(ret, requestablePiece{
-                       index:             piece.index,
-                       t:                 piece.t.Torrent,
-                       NumPendingChunks:  piece.NumPendingChunks,
-                       IterPendingChunks: piece.iterPendingChunksWrapper,
-                       alwaysReallocate:  piece.Priority >= types.PiecePriorityNext,
-               })
+               f(piece.t.Torrent, piece.Piece, piece.index)
        }
        return
 }
@@ -165,7 +159,16 @@ type Input struct {
 
 // TODO: We could do metainfo requests here.
 func Run(input Input) map[PeerId]PeerNextRequestState {
-       requestPieces := getRequestablePieces(input)
+       var requestPieces []requestablePiece
+       GetRequestablePieces(input, func(t *Torrent, piece *Piece, pieceIndex int) {
+               requestPieces = append(requestPieces, requestablePiece{
+                       index:             pieceIndex,
+                       t:                 t,
+                       NumPendingChunks:  piece.NumPendingChunks,
+                       IterPendingChunks: piece.iterPendingChunksWrapper,
+                       alwaysReallocate:  piece.Priority >= types.PiecePriorityNext,
+               })
+       })
        torrents := input.Torrents
        allPeers := make(map[uintptr][]*requestsPeer, len(torrents))
        for _, t := range torrents {
index c7ed3c60b8a0aec70a0e6cfb494cf29f3735a2e6..a31ec772aa9c125c9b7e85fc594c6bc6784d9a0f 100644 (file)
@@ -7,7 +7,8 @@ import (
 type Torrent struct {
        Pieces   []Piece
        Capacity storage.TorrentCapacity
-       Peers    []Peer // not closed.
+       // Unclosed Peers. Not necessary for getting requestable piece ordering.
+       Peers []Peer
        // Some value that's unique and stable between runs. Could even use the infohash?
        StableId uintptr
 
index ea1fb8e844f5993c4ccaf47af17037f1dbd01247..7e42babacf5ccf087b28bcc70b067aab72eebe54 100644 (file)
@@ -39,7 +39,7 @@ func (cl *Client) tickleRequester() {
        cl.updateRequests.Broadcast()
 }
 
-func (cl *Client) doRequests() {
+func (cl *Client) getRequestStrategyInput() request_strategy.Input {
        ts := make([]request_strategy.Torrent, 0, len(cl.torrents))
        for _, t := range cl.torrents {
                rst := request_strategy.Torrent{
@@ -90,10 +90,14 @@ func (cl *Client) doRequests() {
                })
                ts = append(ts, rst)
        }
-       nextPeerStates := request_strategy.Run(request_strategy.Input{
+       return request_strategy.Input{
                Torrents:           ts,
                MaxUnverifiedBytes: cl.config.MaxUnverifiedBytes,
-       })
+       }
+}
+
+func (cl *Client) doRequests() {
+       nextPeerStates := request_strategy.Run(cl.getRequestStrategyInput())
        for p, state := range nextPeerStates {
                setPeerNextRequestState(p, state)
        }