return nil
}
-func (p *Peer) initUpdateRequestsTimer() {
+func (p *PeerConn) initUpdateRequestsTimer() {
if check.Enabled {
if p.updateRequestsTimer != nil {
panic(p.updateRequestsTimer)
const peerUpdateRequestsTimerReason = "updateRequestsTimer"
-func (c *Peer) updateRequestsTimerFunc() {
+func (c *PeerConn) updateRequestsTimerFunc() {
c.locker().Lock()
defer c.locker().Unlock()
if c.closed.IsSet() {
}
c = &PeerConn{
Peer: Peer{
- outgoing: opts.outgoing,
- choking: true,
- peerChoking: true,
- PeerMaxRequests: 250,
+ outgoing: opts.outgoing,
+ choking: true,
+ peerChoking: true,
RemoteAddr: opts.remoteAddr,
localPublicAddr: opts.localPublicAddr,
Network: opts.network,
callbacks: &cl.config.Callbacks,
},
- connString: opts.connString,
- conn: nc,
+ PeerMaxRequests: 250,
+ connString: opts.connString,
+ conn: nc,
}
c.peerRequestDataAllocLimiter.Max = int64(cl.config.MaxAllocPeerRequestDataPerConn)
c.initRequestState()
// with legacy PeerConn methods. New methods and calls that are fixed up should be migrated over to
// newHotPeerImpl.
type legacyPeerImpl interface {
+ // Whether the peer should be told to update requests. Sometimes this is skipped if it's high
+ // priority adjustments to requests. This is kind of only relevant to PeerConn but hasn't been
+ // fully migrated over yet.
+ isLowOnRequests() bool
// Notify that the peers requests should be updated for the provided reason.
onNeedUpdateRequests(reason updateRequestReason)
peerMinPieces pieceIndex
peerAllowedFast typedRoaring.Bitmap[pieceIndex]
-
- PeerMaxRequests maxRequests // Maximum pending requests the peer allows.
}
PeerSource string
type peerLocalPublicAddr = IpPort
-func (p *Peer) isLowOnRequests() bool {
+func (p *PeerConn) isLowOnRequests() bool {
return p.requestState.Requests.IsEmpty() && p.requestState.Cancelled.IsEmpty()
}
// and implementation differences, we may receive chunks that are no longer in the set of
// requests actually want. This could use a roaring.BSI if the memory use becomes noticeable.
validReceiveChunks map[RequestIndex]int
+ PeerMaxRequests maxRequests // Maximum pending requests the peer allows.
// Move to PeerConn?
protocolLogger log.Logger
},
hostKey: t.deriveWebSeedHostKey(url),
}
- ws.peer.initRequestState()
for _, opt := range opts {
opt(&ws.client)
}
}
}
g.MakeMapWithCap(&ws.activeRequests, ws.client.MaxRequests)
- // TODO: Implement an algorithm that assigns this based on sharing chunks across peers. For now
- // we just allow 2 MiB worth of requests. See newHotPeerImpl.nominalMaxRequests.
- ws.peer.PeerMaxRequests = maxRequests(intCeilDiv(8<<20, ws.peer.t.chunkSize.Uint32()))
- ws.peer.initUpdateRequestsTimer()
ws.locker = t.cl.locker()
for _, f := range t.callbacks().NewPeer {
f(&ws.peer)
hostKey webseedHostKeyHandle
}
+func (me *webseedPeer) isLowOnRequests() bool {
+ // Updates globally instead.
+ return false
+}
+
// Webseed requests are issued globally so per-connection reasons or handling make no sense.
func (me *webseedPeer) onNeedUpdateRequests(updateRequestReason) {}
}
func (ws *webseedPeer) onClose() {
- // Just deleting them means we would have to manually cancel active requests.
- ws.peer.cancelAllRequests()
ws.peer.t.iterPeers(func(p *Peer) {
if p.isLowOnRequests() {
p.onNeedUpdateRequests("webseedPeer.onClose")