From: Matt Joiner Date: Thu, 20 May 2021 01:26:08 +0000 (+1000) Subject: Don't always force piece reallocation X-Git-Tag: v1.29.0~31^2~19 X-Git-Url: http://www.git.stargrave.org/?a=commitdiff_plain;h=1a27e1439130bbd6586a9bdb2e47cf6ee513b268;p=btrtrc.git Don't always force piece reallocation The balancing for unforced pieces may not be exactly what we want yet. --- diff --git a/request-strategy/order.go b/request-strategy/order.go index 54bb5285..2ac04ab4 100644 --- a/request-strategy/order.go +++ b/request-strategy/order.go @@ -74,6 +74,7 @@ func (me *peersForPieceRequests) addNextRequest(r Request) { type requestablePiece struct { index pieceIndex t Torrent + alwaysReallocate bool NumPendingChunks int IterPendingChunks ChunksIter } @@ -137,6 +138,7 @@ func getRequestablePieces(input Input) (ret []requestablePiece) { t: piece.t.Torrent, NumPendingChunks: piece.NumPendingChunks, IterPendingChunks: piece.iterPendingChunksWrapper, + alwaysReallocate: piece.Priority >= types.PiecePriorityNext, }) } return @@ -201,23 +203,42 @@ func allocatePendingChunks(p requestablePiece, peers []*requestsPeer) { } } }() - sortPeersForPiece := func(byHasRequest *Request) { + sortPeersForPiece := func(req *Request) { sort.Slice(peersForPiece, func(i, j int) bool { - ml := multiless.New().Int( - peersForPiece[i].requestsInPiece, - peersForPiece[j].requestsInPiece, - ).Int( + byHasRequest := func() multiless.Computation { + ml := multiless.New() + if req != nil { + _, iHas := peersForPiece[i].nextState.Requests[*req] + _, jHas := peersForPiece[j].nextState.Requests[*req] + ml = ml.Bool(jHas, iHas) + } + return ml + }() + ml := multiless.New() + // We always "reallocate", that is force even striping amongst peers that are either on + // the last piece they can contribute too, or for pieces marked for this behaviour. + // Striping prevents starving peers of requests, and will always re-balance to the + // fastest known peers. + if !p.alwaysReallocate { + ml = ml.Bool( + peersForPiece[j].requestablePiecesRemaining == 1, + peersForPiece[i].requestablePiecesRemaining == 1) + } + if p.alwaysReallocate || peersForPiece[j].requestablePiecesRemaining == 1 { + ml = ml.Int( + peersForPiece[i].requestsInPiece, + peersForPiece[j].requestsInPiece) + } else { + ml = ml.AndThen(byHasRequest) + } + ml = ml.Int( peersForPiece[i].requestablePiecesRemaining, peersForPiece[j].requestablePiecesRemaining, ).Float64( peersForPiece[j].DownloadRate, peersForPiece[i].DownloadRate, ) - if byHasRequest != nil { - _, iHas := peersForPiece[i].nextState.Requests[*byHasRequest] - _, jHas := peersForPiece[j].nextState.Requests[*byHasRequest] - ml = ml.Bool(jHas, iHas) - } + ml = ml.AndThen(byHasRequest) return ml.Int64( int64(peersForPiece[j].Age), int64(peersForPiece[i].Age), // TODO: Probably peer priority can come next diff --git a/request-strategy/order_test.go b/request-strategy/order_test.go index adc0c478..a448ac37 100644 --- a/request-strategy/order_test.go +++ b/request-strategy/order_test.go @@ -187,7 +187,9 @@ func TestDontStealUnnecessarily(t *testing.T) { // Slower than the stealers, but has all requests already. stealee := basePeer stealee.DownloadRate = 1 - keepReqs := requestSetFromSlice(r(0, 0), r(0, 1), r(0, 2)) + keepReqs := requestSetFromSlice( + r(3, 2), r(3, 4), r(3, 6), r(3, 8), + r(4, 0), r(4, 1), r(4, 7), r(4, 8)) stealee.HasExistingRequest = func(r Request) bool { _, ok := keepReqs[r] return ok @@ -197,12 +199,41 @@ func TestDontStealUnnecessarily(t *testing.T) { firstStealer.Id = intPeerId(2) secondStealer := basePeer secondStealer.Id = intPeerId(3) + secondStealer.HasPiece = func(i pieceIndex) bool { + switch i { + case 1, 3: + return true + default: + return false + } + } results := Run(Input{Torrents: []Torrent{{ - Pieces: []Piece{{ - Request: true, - NumPendingChunks: 9, - IterPendingChunks: chunkIterRange(9), - }}, + Pieces: []Piece{ + { + Request: true, + NumPendingChunks: 0, + IterPendingChunks: chunkIterRange(9), + }, + { + Request: true, + NumPendingChunks: 7, + IterPendingChunks: chunkIterRange(7), + }, + { + Request: true, + NumPendingChunks: 0, + IterPendingChunks: chunkIterRange(0), + }, + { + Request: true, + NumPendingChunks: 9, + IterPendingChunks: chunkIterRange(9), + }, + { + Request: true, + NumPendingChunks: 9, + IterPendingChunks: chunkIterRange(9), + }}, Peers: []Peer{ firstStealer, stealee, @@ -215,10 +246,10 @@ func TestDontStealUnnecessarily(t *testing.T) { c.Check(results[p].Requests, qt.HasLen, l) c.Check(results[p].Interested, qt.Equals, l > 0) } - check(firstStealer.Id, 3) - check(secondStealer.Id, 3) + check(firstStealer.Id, 5) + check(secondStealer.Id, 7+9) c.Check(results[stealee.Id], qt.ContentEquals, PeerNextRequestState{ Interested: true, - Requests: keepReqs, + Requests: requestSetFromSlice(r(4, 0), r(4, 1), r(4, 7), r(4, 8)), }) }