data []byte
}
-type peer struct {
+type Peer struct {
// First to ensure 64-bit alignment for atomics. See #262.
_stats ConnStats
// Maintains the state of a BitTorrent-protocol based connection with a peer.
type PeerConn struct {
- peer
+ Peer
// A string that should identify the PeerConn's net.Conn endpoints. The net.Conn could
// be wrapping WebRTC, uTP, or TCP etc. Used in writing the conn status for peers.
return fmt.Sprintf("%+-55q %s %s", cn.PeerID, cn.PeerExtensionBytes, cn.connString)
}
-func (cn *peer) updateExpectingChunks() {
+func (cn *Peer) updateExpectingChunks() {
if cn.expectingChunks() {
if cn.lastStartedExpectingToReceiveChunks.IsZero() {
cn.lastStartedExpectingToReceiveChunks = time.Now()
}
}
-func (cn *peer) expectingChunks() bool {
+func (cn *Peer) expectingChunks() bool {
return cn.interested && !cn.peerChoking
}
return ml.FinalOk()
}
-func (cn *peer) cumInterest() time.Duration {
+func (cn *Peer) cumInterest() time.Duration {
ret := cn.priorInterest
if cn.interested {
ret += time.Since(cn.lastBecameInterested)
return ret
}
-func (cn *peer) peerHasAllPieces() (all bool, known bool) {
+func (cn *Peer) peerHasAllPieces() (all bool, known bool) {
if cn.peerSentHaveAll {
return true, true
}
return cn.t.cl.locker()
}
-func (cn *peer) supportsExtension(ext pp.ExtensionName) bool {
+func (cn *Peer) supportsExtension(ext pp.ExtensionName) bool {
_, ok := cn.PeerExtensionIDs[ext]
return ok
}
// The best guess at number of pieces in the torrent for this peer.
-func (cn *peer) bestPeerNumPieces() pieceIndex {
+func (cn *Peer) bestPeerNumPieces() pieceIndex {
if cn.t.haveInfo() {
return cn.t.numPieces()
}
return cn.peerMinPieces
}
-func (cn *peer) completedString() string {
+func (cn *Peer) completedString() string {
have := pieceIndex(cn._peerPieces.Len())
if cn.peerSentHaveAll {
have = cn.bestPeerNumPieces()
}
// Inspired by https://github.com/transmission/transmission/wiki/Peer-Status-Text.
-func (cn *peer) statusFlags() (ret string) {
+func (cn *Peer) statusFlags() (ret string) {
c := func(b byte) {
ret += string([]byte{b})
}
// return buf.String()
// }
-func (cn *peer) downloadRate() float64 {
+func (cn *Peer) downloadRate() float64 {
return float64(cn._stats.BytesReadUsefulData.Int64()) / cn.cumInterest().Seconds()
}
-func (cn *peer) writeStatus(w io.Writer, t *Torrent) {
+func (cn *Peer) writeStatus(w io.Writer, t *Torrent) {
// \t isn't preserved in <pre> blocks?
fmt.Fprintln(w, cn.connStatusString())
fmt.Fprintf(w, " last msg: %s, connected: %s, last helpful: %s, itime: %s, etime: %s\n",
)
}
-func (cn *peer) close() {
+func (cn *Peer) close() {
if !cn.closed.Set() {
return
}
}
}
-func (cn *peer) peerHasPiece(piece pieceIndex) bool {
+func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
return cn.peerSentHaveAll || cn._peerPieces.Contains(bitmap.BitIndex(piece))
}
}
// The actual value to use as the maximum outbound requests.
-func (cn *peer) nominalMaxRequests() (ret int) {
+func (cn *Peer) nominalMaxRequests() (ret int) {
return int(clamp(
1,
int64(cn.PeerMaxRequests),
))
}
-func (cn *peer) totalExpectingTime() (ret time.Duration) {
+func (cn *Peer) totalExpectingTime() (ret time.Duration) {
ret = cn.cumulativeExpectedToReceiveChunks
if !cn.lastStartedExpectingToReceiveChunks.IsZero() {
ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
})
}
-func (cn *peer) setInterested(interested bool) bool {
+func (cn *Peer) setInterested(interested bool) bool {
if cn.interested == interested {
return true
}
// are okay.
type messageWriter func(pp.Message) bool
-func (cn *peer) request(r request) bool {
+func (cn *Peer) request(r request) bool {
if _, ok := cn.requests[r]; ok {
panic("chunk already requested")
}
return me.write(makeCancelMessage(r))
}
-func (cn *peer) doRequestState() bool {
+func (cn *Peer) doRequestState() bool {
if !cn.t.networkingEnabled || cn.t.dataDownloadDisallowed {
if !cn.setInterested(false) {
return false
// conceivable that the best connection should do this, since it's least likely to waste our time if
// assigned to the highest priority pieces, and assigning more than one this role would cause
// significant wasted bandwidth.
-func (cn *peer) shouldRequestWithoutBias() bool {
+func (cn *Peer) shouldRequestWithoutBias() bool {
return cn.t.requestStrategy.shouldRequestWithoutBias(cn.requestStrategyConnection())
}
-func (cn *peer) iterPendingPieces(f func(pieceIndex) bool) bool {
+func (cn *Peer) iterPendingPieces(f func(pieceIndex) bool) bool {
if !cn.t.haveInfo() {
return false
}
return cn.t.requestStrategy.iterPendingPieces(cn, f)
}
-func (cn *peer) iterPendingPiecesUntyped(f iter.Callback) {
+func (cn *Peer) iterPendingPiecesUntyped(f iter.Callback) {
cn.iterPendingPieces(func(i pieceIndex) bool { return f(i) })
}
-func (cn *peer) iterPendingRequests(piece pieceIndex, f func(request) bool) bool {
+func (cn *Peer) iterPendingRequests(piece pieceIndex, f func(request) bool) bool {
return cn.t.requestStrategy.iterUndirtiedChunks(
cn.t.piece(piece).requestStrategyPiece(),
func(cs chunkSpec) bool {
}
// check callers updaterequests
-func (cn *peer) stopRequestingPiece(piece pieceIndex) bool {
+func (cn *Peer) stopRequestingPiece(piece pieceIndex) bool {
return cn._pieceRequestOrder.Remove(bitmap.BitIndex(piece))
}
// preference. Connection piece priority is specific to a connection and is
// used to pseudorandomly avoid connections always requesting the same pieces
// and thus wasting effort.
-func (cn *peer) updatePiecePriority(piece pieceIndex) bool {
+func (cn *Peer) updatePiecePriority(piece pieceIndex) bool {
tpp := cn.t.piecePriority(piece)
if !cn.peerHasPiece(piece) {
tpp = PiecePriorityNone
return cn._pieceRequestOrder.Set(bitmap.BitIndex(piece), prio) || cn.shouldRequestWithoutBias()
}
-func (cn *peer) getPieceInclination() []int {
+func (cn *Peer) getPieceInclination() []int {
if cn.pieceInclination == nil {
cn.pieceInclination = cn.t.getConnPieceInclination()
}
return cn.pieceInclination
}
-func (cn *peer) discardPieceInclination() {
+func (cn *Peer) discardPieceInclination() {
if cn.pieceInclination == nil {
return
}
cn.updateRequests()
}
}
- cn.t.maybeDropMutuallyCompletePeer(&cn.peer)
+ cn.t.maybeDropMutuallyCompletePeer(&cn.Peer)
}
func (cn *PeerConn) raisePeerMinPieces(newMin pieceIndex) {
}
cn.raisePeerMinPieces(piece + 1)
cn._peerPieces.Set(bitmap.BitIndex(piece), true)
- cn.t.maybeDropMutuallyCompletePeer(&cn.peer)
+ cn.t.maybeDropMutuallyCompletePeer(&cn.Peer)
if cn.updatePiecePriority(piece) {
cn.updateRequests()
}
// After handshake, we know what Torrent and Client stats to include for a
// connection.
-func (cn *peer) postHandshakeStats(f func(*ConnStats)) {
+func (cn *Peer) postHandshakeStats(f func(*ConnStats)) {
t := cn.t
f(&t.stats)
f(&t.cl.stats)
// All ConnStats that include this connection. Some objects are not known
// until the handshake is complete, after which it's expected to reconcile the
// differences.
-func (cn *peer) allStats(f func(*ConnStats)) {
+func (cn *Peer) allStats(f func(*ConnStats)) {
f(&cn._stats)
if cn.reconciledHandshakeStats {
cn.postHandshakeStats(f)
// Returns whether the connection could be useful to us. We're seeding and
// they want data, we don't have metainfo and they can provide it, etc.
-func (c *peer) useful() bool {
+func (c *Peer) useful() bool {
t := c.t
if c.closed.IsSet() {
return false
return false
}
-func (c *peer) lastHelpful() (ret time.Time) {
+func (c *Peer) lastHelpful() (ret time.Time) {
ret = c.lastUsefulChunkReceived
if c.t.seeding() && c.lastChunkSent.After(ret) {
ret = c.lastChunkSent
}
}
-func (c *peer) remoteRejectedRequest(r request) {
+func (c *Peer) remoteRejectedRequest(r request) {
if c.deleteRequest(r) {
c.decExpectedChunkReceive(r)
}
}
-func (c *peer) decExpectedChunkReceive(r request) {
+func (c *Peer) decExpectedChunkReceive(r request) {
count := c.validReceiveChunks[r]
if count == 1 {
delete(c.validReceiveChunks, r)
}
// Handle a received chunk from a peer.
-func (c *peer) receiveChunk(msg *pp.Message) error {
+func (c *Peer) receiveChunk(msg *pp.Message) error {
t := c.t
cl := t.cl
torrent.Add("chunks received", 1)
return nil
}
-func (c *peer) onDirtiedPiece(piece pieceIndex) {
+func (c *Peer) onDirtiedPiece(piece pieceIndex) {
if c.peerTouchedPieces == nil {
c.peerTouchedPieces = make(map[pieceIndex]struct{})
}
c.peerTouchedPieces[piece] = struct{}{}
ds := &c.t.pieces[piece].dirtiers
if *ds == nil {
- *ds = make(map[*peer]struct{})
+ *ds = make(map[*Peer]struct{})
}
(*ds)[c] = struct{}{}
}
cn.t.dropConnection(cn)
}
-func (cn *peer) netGoodPiecesDirtied() int64 {
+func (cn *Peer) netGoodPiecesDirtied() int64 {
return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
}
-func (c *peer) peerHasWantedPieces() bool {
+func (c *Peer) peerHasWantedPieces() bool {
return !c._pieceRequestOrder.IsEmpty()
}
-func (c *peer) numLocalRequests() int {
+func (c *Peer) numLocalRequests() int {
return len(c.requests)
}
-func (c *peer) deleteRequest(r request) bool {
+func (c *Peer) deleteRequest(r request) bool {
if _, ok := c.requests[r]; !ok {
return false
}
c.updateRequests()
}
// Give other conns a chance to pick up the request.
- c.t.iterPeers(func(_c *peer) {
+ c.t.iterPeers(func(_c *Peer) {
// We previously checked that the peer wasn't interested to to only wake connections that
// were unable to issue requests due to starvation by the request strategy. There could be
// performance ramifications.
return true
}
-func (c *peer) deleteAllRequests() {
+func (c *Peer) deleteAllRequests() {
for r := range c.requests {
c.deleteRequest(r)
}
c.writerCond.Broadcast()
}
-func (c *peer) postCancel(r request) bool {
+func (c *Peer) postCancel(r request) bool {
if !c.deleteRequest(r) {
return false
}
t.reconcileHandshakeStats(c)
}
-func (c *peer) peerPriority() (peerPriority, error) {
+func (c *Peer) peerPriority() (peerPriority, error) {
return bep40Priority(c.remoteIpPort(), c.t.cl.publicAddr(c.remoteIp()))
}
-func (c *peer) remoteIp() net.IP {
+func (c *Peer) remoteIp() net.IP {
return addrIpOrNil(c.RemoteAddr)
}
-func (c *peer) remoteIpPort() IpPort {
+func (c *Peer) remoteIpPort() IpPort {
ipa, _ := tryIpPortFromNetAddr(c.RemoteAddr)
return IpPort{ipa.IP, uint16(ipa.Port)}
}
return fmt.Sprintf("connection %p", c)
}
-func (c *peer) trust() connectionTrust {
+func (c *Peer) trust() connectionTrust {
return connectionTrust{c.trusted, c.netGoodPiecesDirtied()}
}
return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less()
}
-func (cn *peer) requestStrategyConnection() requestStrategyConnection {
+func (cn *Peer) requestStrategyConnection() requestStrategyConnection {
return cn
}
-func (cn *peer) chunksReceivedWhileExpecting() int64 {
+func (cn *Peer) chunksReceivedWhileExpecting() int64 {
return cn._chunksReceivedWhileExpecting
}
-func (cn *peer) fastest() bool {
+func (cn *Peer) fastest() bool {
return cn == cn.t.fastestPeer
}
-func (cn *peer) peerMaxRequests() int {
+func (cn *Peer) peerMaxRequests() int {
return cn.PeerMaxRequests
}
return cn.peerPieces()
}
-func (cn *peer) peerPieces() bitmap.Bitmap {
+func (cn *Peer) peerPieces() bitmap.Bitmap {
ret := cn._peerPieces.Copy()
if cn.peerSentHaveAll {
ret.AddRange(0, cn.t.numPieces())
return ret
}
-func (cn *peer) pieceRequestOrder() *prioritybitmap.PriorityBitmap {
+func (cn *Peer) pieceRequestOrder() *prioritybitmap.PriorityBitmap {
return &cn._pieceRequestOrder
}
-func (cn *peer) stats() *ConnStats {
+func (cn *Peer) stats() *ConnStats {
return &cn._stats
}
-func (cn *peer) torrent() requestStrategyTorrent {
+func (cn *Peer) torrent() requestStrategyTorrent {
return cn.t.requestStrategyTorrent()
}
conn *PeerConn
f pp.PexPeerFlags
}{
- {&PeerConn{peer: peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
- {&PeerConn{peer: peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
- {&PeerConn{peer: peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
- {&PeerConn{peer: peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
- {&PeerConn{peer: peer{RemoteAddr: udpAddr}}, pp.PexSupportsUtp},
- {&PeerConn{peer: peer{RemoteAddr: udpAddr, outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
- {&PeerConn{peer: peer{RemoteAddr: tcpAddr, outgoing: true}}, pp.PexOutgoingConn},
- {&PeerConn{peer: peer{RemoteAddr: tcpAddr}}, 0},
+ {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0},
+ {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption},
+ {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn},
+ {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption},
+ {&PeerConn{Peer: Peer{RemoteAddr: udpAddr}}, pp.PexSupportsUtp},
+ {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp},
+ {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, outgoing: true}}, pp.PexOutgoingConn},
+ {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr}}, 0},
}
for i, tc := range testcases {
f := tc.conn.pexPeerFlags()
}{
{
pexAdd,
- &PeerConn{peer: peer{RemoteAddr: udpAddr}},
+ &PeerConn{Peer: Peer{RemoteAddr: udpAddr}},
pexEvent{pexAdd, udpAddr, pp.PexSupportsUtp},
},
{
pexDrop,
- &PeerConn{peer: peer{RemoteAddr: tcpAddr, outgoing: true, PeerListenPort: dialTcpAddr.Port}},
+ &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, outgoing: true, PeerListenPort: dialTcpAddr.Port}},
pexEvent{pexDrop, tcpAddr, pp.PexOutgoingConn},
},
{
pexAdd,
- &PeerConn{peer: peer{RemoteAddr: tcpAddr, PeerListenPort: dialTcpAddr.Port}},
+ &PeerConn{Peer: Peer{RemoteAddr: tcpAddr, PeerListenPort: dialTcpAddr.Port}},
pexEvent{pexAdd, dialTcpAddr, 0},
},
{
pexDrop,
- &PeerConn{peer: peer{RemoteAddr: udpAddr, PeerListenPort: dialUdpAddr.Port}},
+ &PeerConn{Peer: Peer{RemoteAddr: udpAddr, PeerListenPort: dialUdpAddr.Port}},
pexEvent{pexDrop, dialUdpAddr, pp.PexSupportsUtp},
},
}
fileIndex segments.Index
files *[]*File
- webSeeds map[string]*peer
+ webSeeds map[string]*Peer
// Active peer connections, running message stream loops. TODO: Make this
// open (not-closed) connections only.
// Set of addrs to which we're attempting to connect. Connections are
// half-open until all handshakes are completed.
halfOpen map[string]PeerInfo
- fastestPeer *peer
+ fastestPeer *Peer
// Reserve of peers to connect to. A peer can be both here and in the
// active connections if were told about the peer after connecting with
// This seems to be all the follow-up tasks after info is set, that can't fail.
func (t *Torrent) onSetInfo() {
- t.iterPeers(func(p *peer) {
+ t.iterPeers(func(p *Peer) {
p.onGotInfo(t.info)
})
for i := range t.pieces {
func (t *Torrent) maybeDropMutuallyCompletePeer(
// I'm not sure about taking peer here, not all peer implementations actually drop. Maybe that's okay?
- p *peer,
+ p *Peer,
) {
if !t.cl.config.DropMutuallyCompletePeers {
return
func (t *Torrent) piecePriorityChanged(piece pieceIndex) {
// t.logger.Printf("piece %d priority changed", piece)
- t.iterPeers(func(c *peer) {
+ t.iterPeers(func(c *Peer) {
if c.updatePiecePriority(piece) {
// log.Print("conn piece priority changed")
c.updateRequests()
}
func (t *Torrent) numActivePeers() (num int) {
- t.iterPeers(func(*peer) {
+ t.iterPeers(func(*Peer) {
num++
})
return
oldMax = t.maxEstablishedConns
t.maxEstablishedConns = max
wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), func(l, r *PeerConn) bool {
- return worseConn(&l.peer, &r.peer)
+ return worseConn(&l.Peer, &r.Peer)
})
for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
t.dropConnection(wcs.Pop().(*PeerConn))
c.stats().incrementPiecesDirtiedBad()
}
- bannableTouchers := make([]*peer, 0, len(p.dirtiers))
+ bannableTouchers := make([]*Peer, 0, len(p.dirtiers))
for c := range p.dirtiers {
if !c.trusted {
bannableTouchers = append(bannableTouchers, c)
t.cancelRequestsForPiece(piece)
for conn := range t.conns {
conn.have(piece)
- t.maybeDropMutuallyCompletePeer(&conn.peer)
+ t.maybeDropMutuallyCompletePeer(&conn.Peer)
}
}
// c.drop()
// }
// }
- t.iterPeers(func(conn *peer) {
+ t.iterPeers(func(conn *Peer) {
if conn.peerHasPiece(piece) {
conn.updateRequests()
}
}
}
-func (t *Torrent) peersAsSlice() (ret []*peer) {
- t.iterPeers(func(p *peer) {
+func (t *Torrent) peersAsSlice() (ret []*Peer) {
+ t.iterPeers(func(p *Peer) {
ret = append(ret, p)
})
return
torrent.Add("request timeouts", 1)
cb.t.cl.lock()
defer cb.t.cl.unlock()
- cb.t.iterPeers(func(cn *peer) {
+ cb.t.iterPeers(func(cn *Peer) {
if cn.peerHasPiece(pieceIndex(r.Index)) {
cn.updateRequests()
}
func (t *Torrent) disallowDataDownloadLocked() {
t.dataDownloadDisallowed = true
- t.iterPeers(func(c *peer) {
+ t.iterPeers(func(c *Peer) {
c.updateRequests()
})
t.tickleReaders()
defer t.cl.unlock()
t.dataDownloadDisallowed = false
t.tickleReaders()
- t.iterPeers(func(c *peer) {
+ t.iterPeers(func(c *Peer) {
c.updateRequests()
})
}
t.userOnWriteChunkErr = f
}
-func (t *Torrent) iterPeers(f func(*peer)) {
+func (t *Torrent) iterPeers(f func(*Peer)) {
for pc := range t.conns {
- f(&pc.peer)
+ f(&pc.Peer)
}
for _, ws := range t.webSeeds {
f(ws)
}
const maxRequests = 10
ws := webseedPeer{
- peer: peer{
+ peer: Peer{
t: t,
outgoing: true,
network: "http",
t.webSeeds[url] = &ws.peer
}
-func (t *Torrent) peerIsActive(p *peer) (active bool) {
- t.iterPeers(func(p1 *peer) {
+func (t *Torrent) peerIsActive(p *Peer) (active bool) {
+ t.iterPeers(func(p1 *Peer) {
if p1 == p {
active = true
}