torrent.Add("webseed request error count", 1)
// This used to occur only on webseed.ErrTooFast but I think it makes sense to slow down any
// kind of error. Pausing here will starve the available requester slots which slows things
- // down. TODO: I don't think this will help anymore. Need to register a reduced concurrency
- // available for a host/cost key.
+ // down.
select {
case <-ws.peer.closed.Done():
case <-time.After(time.Duration(rand.Int63n(int64(10 * time.Second)))):
if err != nil {
ws.peer.onNeedUpdateRequests("webseedPeer request errored")
}
- ws.peer.t.cl.updateWebSeedRequests("webseedPeer request completed")
+ ws.peer.t.cl.updateWebseedRequestsWithReason("webseedPeer request completed")
locker.Unlock()
}
value.existingWebseedRequest.Cancel()
}
- for _, requestKeys := range plan.byCost {
+ for costKey, requestKeys := range plan.byCost {
for _, requestKey := range requestKeys {
+ // This could happen if a request is cancelled but hasn't removed itself from the active
+ // list yet. This helps with backpressure as the requests can sleep to rate limit.
+ if !cl.underWebSeedHttpRequestLimit(costKey) {
+ break
+ }
if g.MapContains(existingRequests, requestKey) {
continue
}
}
-func (cl *Client) updateWebSeedRequests(reason updateRequestReason) {
+func (cl *Client) updateWebseedRequestsWithReason(reason updateRequestReason) {
+ // Should we wrap this with pprof labels?
cl.updateWebseedRequests()
}
func (cl *Client) updateWebseedRequests() {
cl.globalUpdateWebSeedRequests()
- // Should have already run to get here.
cl.webseedRequestTimer.Reset(webseedRequestUpdateTimerInterval)
}