Note that this breaks the backpressure on webseed responses again, and should be fixed shortly.
torrent.Add("chunks received due to allowed fast", 1)
}
- defer func() {
+ // TODO: This needs to happen immediately, to prevent cancels occurring asynchronously when have
+ // actually already received the piece, while we have the Client unlocked to write the data out.
+ {
+ if _, ok := c.requests[req]; ok {
+ for _, f := range c.callbacks.ReceivedRequested {
+ f(PeerMessageEvent{c, msg})
+ }
+ }
// Request has been satisfied.
if c.deleteRequest(req) {
if c.expectingChunks() {
} else {
torrent.Add("chunks received unwanted", 1)
}
- }()
+ }
// Do we actually want this chunk?
if t.haveChunk(req) {
Network: "http",
reconciledHandshakeStats: true,
peerSentHaveAll: true,
- PeerMaxRequests: maxRequests,
- RemoteAddr: remoteAddrFromUrl(url),
+ // TODO: Raise this limit, and instead limit concurrent fetches.
+ PeerMaxRequests: maxRequests,
+ RemoteAddr: remoteAddrFromUrl(url),
},
client: webseed.Client{
+ // TODO: Investigate a MaxConnsPerHost in the transport for this, possibly in a global
+ // Client.
HttpClient: http.DefaultClient,
Url: url,
},
)
type webseedPeer struct {
- client webseed.Client
+ client webseed.Client
+ // TODO: Remove finished entries from this.
requests map[Request]webseed.Request
peer Peer
}