lastChunkSent time.Time
// Stuff controlled by the local peer.
- needRequestUpdate string
+ needRequestUpdate updateRequestReason
requestState request_strategy.PeerRequestState
updateRequestsTimer *time.Timer
lastRequestUpdate time.Time
}
peerRequests = orderedBitmap[RequestIndex]
+
+ updateRequestReason string
)
const (
PeerSourceDirect = "M"
)
+// These are grouped because we might vary update request behaviour depending on the reason. I'm not
+// sure about the fact that multiple reasons can be triggered before an update runs, and only the
+// first will count. Possibly we should be signalling what behaviours are appropriate in the next
+// update instead.
+const (
+ peerUpdateRequestsPeerCancelReason updateRequestReason = "Peer.cancel"
+ peerUpdateRequestsRemoteRejectReason updateRequestReason = "Peer.remoteRejectedRequest"
+)
+
// Returns the Torrent a Peer belongs to. Shouldn't change for the lifetime of the Peer. May be nil
// if we are the receiving end of a connection and the handshake hasn't been received or accepted
// yet.
return cn.peerImpl._request(ppReq), nil
}
-var peerUpdateRequestsPeerCancelReason = "Peer.cancel"
-
func (me *Peer) cancel(r RequestIndex) {
if !me.deleteRequest(r) {
panic("request not existing should have been guarded")
}
// Sets a reason to update requests, and if there wasn't already one, handle it.
-func (cn *Peer) updateRequests(reason string) {
+func (cn *Peer) updateRequests(reason updateRequestReason) {
if cn.needRequestUpdate != "" {
return
}
}
}
-var peerUpdateRequestsRemoteRejectReason = "Peer.remoteRejectedRequest"
-
// Returns true if it was valid to reject the request.
func (c *Peer) remoteRejectedRequest(r RequestIndex) bool {
if c.deleteRequest(r) {
return true
}
-func (c *Peer) deleteAllRequests(reason string) {
+func (c *Peer) deleteAllRequests(reason updateRequestReason) {
if c.requestState.Requests.IsEmpty() {
return
}
}
pprof.Do(
context.Background(),
- pprof.Labels("update request", p.needRequestUpdate),
+ pprof.Labels("update request", string(p.needRequestUpdate)),
func(_ context.Context) {
next := p.getDesiredRequestState()
p.applyRequestState(next)
t.cl.unlock()
}
-func (t *Torrent) cancelPiecesLocked(begin, end pieceIndex, reason string) {
+func (t *Torrent) cancelPiecesLocked(begin, end pieceIndex, reason updateRequestReason) {
for i := begin; i < end; i++ {
p := &t.pieces[i]
if p.priority == PiecePriorityNone {
t.openNewConns()
}
-func (t *Torrent) onPiecePendingTriggers(piece pieceIndex, reason string) {
+func (t *Torrent) onPiecePendingTriggers(piece pieceIndex, reason updateRequestReason) {
if t._pendingPieces.Contains(uint32(piece)) {
t.iterPeers(func(c *Peer) {
// if c.requestState.Interested {
}
}
-func (t *Torrent) updatePiecePriority(piece pieceIndex, reason string) {
+func (t *Torrent) updatePiecePriority(piece pieceIndex, reason updateRequestReason) {
if t.updatePiecePriorityNoTriggers(piece) && !t.disableTriggers {
t.onPiecePendingTriggers(piece, reason)
}
t.updatePieceRequestOrderPiece(piece)
}
-func (t *Torrent) updateAllPiecePriorities(reason string) {
+func (t *Torrent) updateAllPiecePriorities(reason updateRequestReason) {
t.updatePiecePriorities(0, t.numPieces(), reason)
}
// Update all piece priorities in one hit. This function should have the same
// output as updatePiecePriority, but across all pieces.
-func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason string) {
+func (t *Torrent) updatePiecePriorities(begin, end pieceIndex, reason updateRequestReason) {
for i := begin; i < end; i++ {
t.updatePiecePriority(i, reason)
}
t.piece(t.pieceIndexOfRequestIndex(req)).pendChunkIndex(req % t.chunksPerRegularPiece())
}
-func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason string) {
+func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason updateRequestReason) {
t.cl.event.Broadcast()
if t.pieceComplete(piece) {
t.onPieceCompleted(piece)