}
func (p *Piece) ignoreForRequests() bool {
- return p.hashing || p.marking || !p.haveHash() || p.t.pieceComplete(p.index) || p.queuedForHash()
+ return p.hashing || p.marking || !p.haveHash() || p.t.pieceComplete(p.index) || p.queuedForHash() || p.t.dataDownloadDisallowed.IsSet()
}
// This is the priority adjusted for piece state like completion, hashing etc.
}
// Webseed requests are issued globally so per-connection reasons or handling make no sense.
-func (me *webseedPeer) onNeedUpdateRequests(updateRequestReason) {}
+func (me *webseedPeer) onNeedUpdateRequests(updateRequestReason) {
+ me.peer.cl.scheduleImmediateWebseedRequestUpdate()
+}
func (me *webseedPeer) expectingChunks() bool {
return len(me.activeRequests) > 0
}
// Add remaining existing requests.
for key := range unusedExistingRequests {
+ // Don't reconsider existing requests that aren't wanted anymore.
+ if key.t.dataDownloadDisallowed.IsSet() {
+ continue
+ }
heapSlice = append(heapSlice, heapElem{key, existingRequests[key]})
}
aprioriHeap := heap.InterfaceForSlice(
// handling overhead. Need the value to avoid looking this up again.
costKey := elem.costKey
panicif.Zero(costKey)
+ if elem.existingWebseedRequest == nil {
+ // Existing requests might be within the allowed discard range.
+ panicif.Eq(elem.priority, PiecePriorityNone)
+ }
+ panicif.True(elem.t.dataDownloadDisallowed.IsSet())
+ panicif.True(elem.t.closed.IsSet())
if len(plan.byCost[costKey]) >= webseedHostRequestConcurrency {
continue
}
}
// Set the timer to fire right away (this will coalesce consecutive updates without forcing an
// update on every call to this method). Since we're holding the Client lock, and we cancelled
- // the timer, and it wasn't active, nobody else should have reset it before us.
+ // the timer, and it wasn't active, nobody else should have reset it before us. Do we need to
+ // introduce a "reason" field here, (albeit Client-level?).
panicif.True(cl.webseedRequestTimer.Reset(0))
}