func (me *Peer) cancel(r RequestIndex) bool {
if me.deleteRequest(r) {
+ if me.actualRequestState.Requests.GetCardinality() == 0 {
+ me.updateRequests("Peer.cancel")
+ }
return me.peerImpl._cancel(me.t.requestIndexToRequest(r))
}
return true
func (c *Peer) remoteRejectedRequest(r RequestIndex) {
if c.deleteRequest(r) {
+ if c.actualRequestState.Requests.GetCardinality() == 0 {
+ c.updateRequests("Peer.remoteRejectedRequest")
+ }
c.decExpectedChunkReceive(r)
}
}
if deletedRequest {
c.piecesReceivedSinceLastRequestUpdate++
if c.actualRequestState.Requests.GetCardinality() == 0 {
- c.updateRequests("piece")
+ c.updateRequests("Peer.receiveChunk deleted request")
}
c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
}
// Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
// request update runs while we're writing the chunk that just failed. Then we never do a
// fresh update after pending the failed request.
- c.updateRequests("write chunk error")
+ c.updateRequests("Peer.receiveChunk error writing chunk")
t.onWriteChunkErr(err)
return nil
}