"io"
"math/rand"
"net"
- "sort"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/RoaringBitmap/roaring"
+ "github.com/anacrolix/chansync"
+ . "github.com/anacrolix/generics"
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo/iter"
"github.com/anacrolix/missinggo/v2/bitmap"
"github.com/anacrolix/multiless"
-
- "github.com/anacrolix/chansync"
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/torrent/metainfo"
"github.com/anacrolix/torrent/mse"
pp "github.com/anacrolix/torrent/peer_protocol"
request_strategy "github.com/anacrolix/torrent/request-strategy"
+ "github.com/anacrolix/torrent/typed-roaring"
)
type PeerSource string
String() string
}
-// Since we have to store all the requests in memory, we can't reasonably exceed what would be
-// indexable with the memory space available.
type (
- maxRequests = int
- requestState = request_strategy.PeerRequestState
+ // Since we have to store all the requests in memory, we can't reasonably exceed what could be
+ // indexed with the memory space available.
+ maxRequests = int
)
type Peer struct {
peerImpl
callbacks *Callbacks
- outgoing bool
- Network string
- RemoteAddr PeerRemoteAddr
+ outgoing bool
+ Network string
+ RemoteAddr PeerRemoteAddr
+ bannableAddr Option[bannableAddr]
// True if the connection is operating over MSE obfuscation.
headerEncrypted bool
cryptoMethod mse.CryptoMethod
// Stuff controlled by the local peer.
needRequestUpdate string
- requestState requestState
+ requestState request_strategy.PeerRequestState
updateRequestsTimer *time.Timer
+ lastRequestUpdate time.Time
+ peakRequests maxRequests
lastBecameInterested time.Time
priorInterest time.Duration
peerMinPieces pieceIndex
// Pieces we've accepted chunks for from the peer.
peerTouchedPieces map[pieceIndex]struct{}
- peerAllowedFast roaring.Bitmap
+ peerAllowedFast typedRoaring.Bitmap[pieceIndex]
PeerMaxRequests maxRequests // Maximum pending requests the peer allows.
PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
logger log.Logger
}
+type peerRequests = orderedBitmap[RequestIndex]
+
+func (p *Peer) initRequestState() {
+ p.requestState.Requests = &peerRequests{}
+}
+
// Maintains the state of a BitTorrent-protocol based connection with a peer.
type PeerConn struct {
Peer
return true
}
haveAllowedFastRequests := false
- cn.peerAllowedFast.Iterate(func(i uint32) bool {
- haveAllowedFastRequests = roaringBitmapRangeCardinality(
- &cn.requestState.Requests,
- cn.t.pieceRequestIndexOffset(pieceIndex(i)),
- cn.t.pieceRequestIndexOffset(pieceIndex(i+1)),
+ cn.peerAllowedFast.Iterate(func(i pieceIndex) bool {
+ haveAllowedFastRequests = roaringBitmapRangeCardinality[RequestIndex](
+ cn.requestState.Requests,
+ cn.t.pieceRequestIndexOffset(i),
+ cn.t.pieceRequestIndexOffset(i+1),
) == 0
return !haveAllowedFastRequests
})
}
func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool {
- return cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(piece))
+ return cn.peerChoking && !cn.peerAllowedFast.Contains(piece)
}
// Returns true if the connection is over IPv6.
// Returns whether the left connection should be preferred over the right one,
// considering only their networking properties. If ok is false, we can't
// decide.
-func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) (left, ok bool) {
- var ml multiLess
- ml.NextBool(l.isPreferredDirection(), r.isPreferredDirection())
- ml.NextBool(!l.utp(), !r.utp())
- ml.NextBool(l.ipv6(), r.ipv6())
- return ml.FinalOk()
+func (l *PeerConn) hasPreferredNetworkOver(r *PeerConn) bool {
+ var ml multiless.Computation
+ ml = ml.Bool(r.isPreferredDirection(), l.isPreferredDirection())
+ ml = ml.Bool(l.utp(), r.utp())
+ ml = ml.Bool(r.ipv6(), l.ipv6())
+ return ml.Less()
}
func (cn *Peer) cumInterest() time.Duration {
return float64(num) / cn.totalExpectingTime().Seconds()
}
-func (cn *Peer) numRequestsByPiece() (ret map[pieceIndex]int) {
- ret = make(map[pieceIndex]int)
- cn.requestState.Requests.Iterate(func(x uint32) bool {
- ret[pieceIndex(x/cn.t.chunksPerRegularPiece())]++
+func (cn *Peer) DownloadRate() float64 {
+ cn.locker().Lock()
+ defer cn.locker().Unlock()
+
+ return cn.downloadRate()
+}
+
+func (cn *Peer) iterContiguousPieceRequests(f func(piece pieceIndex, count int)) {
+ var last Option[pieceIndex]
+ var count int
+ next := func(item Option[pieceIndex]) {
+ if item == last {
+ count++
+ } else {
+ if count != 0 {
+ f(last.Value(), count)
+ }
+ last = item
+ count = 1
+ }
+ }
+ cn.requestState.Requests.Iterate(func(requestIndex request_strategy.RequestIndex) bool {
+ next(Some(cn.t.pieceIndexOfRequestIndex(requestIndex)))
return true
})
- return
+ next(None[pieceIndex]())
}
func (cn *Peer) writeStatus(w io.Writer, t *Torrent) {
cn.downloadRate()/(1<<10),
)
fmt.Fprintf(w, " requested pieces:")
- type pieceNumRequestsType struct {
- piece pieceIndex
- numRequests int
- }
- var pieceNumRequests []pieceNumRequestsType
- for piece, count := range cn.numRequestsByPiece() {
- pieceNumRequests = append(pieceNumRequests, pieceNumRequestsType{piece, count})
- }
- sort.Slice(pieceNumRequests, func(i, j int) bool {
- return pieceNumRequests[i].piece < pieceNumRequests[j].piece
+ cn.iterContiguousPieceRequests(func(piece pieceIndex, count int) {
+ fmt.Fprintf(w, " %v(%v)", piece, count)
})
- for _, elem := range pieceNumRequests {
- fmt.Fprintf(w, " %v(%v)", elem.piece, elem.numRequests)
- }
fmt.Fprintf(w, "\n")
}
// 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
// https://github.com/pion/datachannel/issues/59 is fixed.
-const writeBufferHighWaterLen = 1 << 15
+const (
+ writeBufferHighWaterLen = 1 << 15
+ writeBufferLowWaterLen = writeBufferHighWaterLen / 2
+)
// Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is
// done asynchronously, so it may be that we're not able to honour backpressure from this method.
return index < len(cn.metadataRequests) && cn.metadataRequests[index]
}
+var (
+ interestedMsgLen = len(pp.Message{Type: pp.Interested}.MustMarshalBinary())
+ requestMsgLen = len(pp.Message{Type: pp.Request}.MustMarshalBinary())
+ // This is the maximum request count that could fit in the write buffer if it's at or below the
+ // low water mark when we run maybeUpdateActualRequestState.
+ maxLocalToRemoteRequests = (writeBufferHighWaterLen - writeBufferLowWaterLen - interestedMsgLen) / requestMsgLen
+)
+
// The actual value to use as the maximum outbound requests.
-func (cn *Peer) nominalMaxRequests() (ret maxRequests) {
- return maxRequests(clamp(1, int64(cn.PeerMaxRequests), 2048))
+func (cn *Peer) nominalMaxRequests() maxRequests {
+ return maxInt(1, minInt(cn.PeerMaxRequests, cn.peakRequests*2, maxLocalToRemoteRequests))
}
func (cn *Peer) totalExpectingTime() (ret time.Duration) {
more = msg(pp.Message{
Type: pp.Choke,
})
- if cn.fastEnabled() {
- for r := range cn.peerRequests {
- // TODO: Don't reject pieces in allowed fast set.
- cn.reject(r)
- }
- } else {
+ if !cn.fastEnabled() {
cn.peerRequests = nil
}
return
// This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it
// when we want to go fast.
func (cn *Peer) shouldRequest(r RequestIndex) error {
- pi := pieceIndex(r / cn.t.chunksPerRegularPiece())
+ pi := cn.t.pieceIndexOfRequestIndex(r)
if cn.requestState.Cancelled.Contains(r) {
return errors.New("request is cancelled and waiting acknowledgement")
}
if cn.t.pieceQueuedForHash(pi) {
panic("piece is queued for hash")
}
- if cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(pi)) {
+ if cn.peerChoking && !cn.peerAllowedFast.Contains(pi) {
// This could occur if we made a request with the fast extension, and then got choked and
// haven't had the request rejected yet.
if !cn.requestState.Requests.Contains(r) {
cn.validReceiveChunks = make(map[RequestIndex]int)
}
cn.validReceiveChunks[r]++
- cn.t.pendingRequests[r] = cn
- cn.t.lastRequested[r] = time.Now()
+ cn.t.requestState[r] = requestState{
+ peer: cn,
+ when: time.Now(),
+ }
cn.updateExpectingChunks()
ppReq := cn.t.requestIndexToRequest(r)
for _, f := range cn.callbacks.SentRequest {
panic("request already cancelled")
}
}
+ me.decPeakRequests()
if me.isLowOnRequests() {
me.updateRequests("Peer.cancel")
}
}
func (cn *PeerConn) fillWriteBuffer() {
- if !cn.maybeUpdateActualRequestState() {
- return
- }
+ if cn.messageWriter.writeBuffer.Len() > writeBufferLowWaterLen {
+ // Fully committing to our max requests requires sufficient space (see
+ // maxLocalToRemoteRequests). Flush what we have instead. We also prefer always to make
+ // requests than to do PEX or upload, so we short-circuit before handling those. Any update
+ // request reason will not be cleared, so we'll come right back here when there's space. We
+ // can't do this in maybeUpdateActualRequestState because it's a method on Peer and has no
+ // knowledge of write buffers.
+ }
+ cn.maybeUpdateActualRequestState()
if cn.pex.IsEnabled() {
if flow := cn.pex.Share(cn.write); !flow {
return
if cn.needRequestUpdate != "" {
return
}
+ if reason != peerUpdateRequestsTimerReason && !cn.isLowOnRequests() {
+ return
+ }
cn.needRequestUpdate = reason
cn.handleUpdateRequests()
}
// Ignore known excess pieces.
bf = bf[:cn.t.numPieces()]
}
- pp := cn.newPeerPieces()
+ bm := boolSliceToBitmap(bf)
+ if cn.t.haveInfo() && pieceIndex(bm.GetCardinality()) == cn.t.numPieces() {
+ cn.onPeerHasAllPieces()
+ return nil
+ }
+ if !bm.IsEmpty() {
+ cn.raisePeerMinPieces(pieceIndex(bm.Maximum()) + 1)
+ }
+ shouldUpdateRequests := false
+ if cn.peerSentHaveAll {
+ if !cn.t.deleteConnWithAllPieces(&cn.Peer) {
+ panic(cn)
+ }
+ cn.peerSentHaveAll = false
+ if !cn._peerPieces.IsEmpty() {
+ panic("if peer has all, we expect no individual peer pieces to be set")
+ }
+ } else {
+ bm.Xor(&cn._peerPieces)
+ }
cn.peerSentHaveAll = false
- for i, have := range bf {
- if have {
- cn.raisePeerMinPieces(pieceIndex(i) + 1)
- if !pp.Contains(bitmap.BitIndex(i)) {
- cn.t.incPieceAvailability(i)
- }
+ // bm is now 'on' for pieces that are changing
+ bm.Iterate(func(x uint32) bool {
+ pi := pieceIndex(x)
+ if cn._peerPieces.Contains(x) {
+ // Then we must be losing this piece
+ cn.t.decPieceAvailability(pi)
} else {
- if pp.Contains(bitmap.BitIndex(i)) {
- cn.t.decPieceAvailability(i)
- }
- }
- if have {
- cn._peerPieces.Add(uint32(i))
- if cn.t.wantPieceIndex(i) {
- cn.updateRequests("bitfield")
+ if !shouldUpdateRequests && cn.t.wantPieceIndex(pieceIndex(x)) {
+ shouldUpdateRequests = true
}
- } else {
- cn._peerPieces.Remove(uint32(i))
+ // We must be gaining this piece
+ cn.t.incPieceAvailability(pieceIndex(x))
}
+ return true
+ })
+ // Apply the changes. If we had everything previously, this should be empty, so xor is the same
+ // as or.
+ cn._peerPieces.Xor(&bm)
+ if shouldUpdateRequests {
+ cn.updateRequests("bitfield")
}
+ // We didn't guard this before, I see no reason to do it now.
cn.peerPiecesChanged()
return nil
}
func (cn *PeerConn) onPeerHasAllPieces() {
t := cn.t
if t.haveInfo() {
- npp, pc := cn.newPeerPieces(), t.numPieces()
- for i := 0; i < pc; i += 1 {
- if !npp.Contains(bitmap.BitIndex(i)) {
- t.incPieceAvailability(i)
- }
- }
+ cn._peerPieces.Iterate(func(x uint32) bool {
+ t.decPieceAvailability(pieceIndex(x))
+ return true
+ })
}
+ t.addConnWithAllPieces(&cn.Peer)
cn.peerSentHaveAll = true
cn._peerPieces.Clear()
if !cn.t._pendingPieces.IsEmpty() {
}
func (cn *PeerConn) peerSentHaveNone() error {
- cn.t.decPeerPieceAvailability(&cn.Peer)
+ if cn.peerSentHaveAll {
+ cn.t.decPeerPieceAvailability(&cn.Peer)
+ }
cn._peerPieces.Clear()
cn.peerSentHaveAll = false
cn.peerPiecesChanged()
value := &peerRequestState{}
c.peerRequests[r] = value
go c.peerRequestDataReader(r, value)
- // c.tickleWriter()
return nil
}
if b == nil {
panic("data must be non-nil to trigger send")
}
+ torrent.Add("peer request data read successes", 1)
prs.data = b
c.tickleWriter()
}
// If this is maintained correctly, we might be able to support optional synchronous reading for
// chunk sending, the way it used to work.
func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) {
- c.logger.WithDefaultLevel(log.Warning).Printf("error reading chunk for peer Request %v: %v", r, err)
+ torrent.Add("peer request data read failures", 1)
+ logLevel := log.Warning
+ if c.t.hasStorageCap() {
+ // It's expected that pieces might drop. See
+ // https://github.com/anacrolix/torrent/issues/702#issuecomment-1000953313.
+ logLevel = log.Debug
+ }
+ c.logger.WithDefaultLevel(logLevel).Printf("error reading chunk for peer Request %v: %v", r, err)
if c.t.closed.IsSet() {
return
}
// here.
c.t.updatePieceCompletion(i)
}
- // If we failed to send a chunk, choke the peer to ensure they flush all their requests. We've
- // probably dropped a piece from storage, but there's no way to communicate this to the peer. If
- // they ask for it again, we'll kick them to allow us to send them an updated bitfield on the
- // next connect. TODO: Support rejecting here too.
- if c.choking {
- c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+ // We've probably dropped a piece from storage, but there's no way to communicate this to the
+ // peer. If they ask for it again, we kick them allowing us to send them updated piece states if
+ // we reconnect. TODO: Instead, we could just try to update them with Bitfield or HaveNone and
+ // if they kick us for breaking protocol, on reconnect we will be compliant again (at least
+ // initially).
+ if c.fastEnabled() {
+ c.reject(r)
+ } else {
+ if c.choking {
+ // If fast isn't enabled, I think we would have wiped all peer requests when we last
+ // choked, and requests while we're choking would be ignored. It could be possible that
+ // a peer request data read completed concurrently to it being deleted elsewhere.
+ c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+ }
+ // Choking a non-fast peer should cause them to flush all their requests.
+ c.choke(c.write)
}
- c.choke(c.write)
}
func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) {
}
func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) {
- c.logger.WithLevel(level).WithContextText(fmt.Sprintf(
+ c.logger.WithContextText(fmt.Sprintf(
"peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(),
- )).SkipCallers(1).Printf(format, arg...)
+ )).SkipCallers(1).Levelf(level, format, arg...)
}
// Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and
decoder := pp.Decoder{
R: bufio.NewReaderSize(c.r, 1<<17),
- MaxLength: 256 * 1024,
+ MaxLength: 4 * pp.Integer(max(int64(t.chunkSize), defaultChunkSize)),
Pool: &t.chunkPool,
}
for {
break
}
if !c.fastEnabled() {
- if !c.deleteAllRequests().IsEmpty() {
- c.t.iterPeers(func(p *Peer) {
- if p.isLowOnRequests() {
- p.updateRequests("choked by non-fast PeerConn")
- }
- })
- }
+ c.deleteAllRequests("choked by non-fast PeerConn")
} else {
// We don't decrement pending requests here, let's wait for the peer to either
// reject or satisfy the outstanding requests. Additionally, some peers may unchoke
}
c.peerChoking = false
preservedCount := 0
- c.requestState.Requests.Iterate(func(x uint32) bool {
- if !c.peerAllowedFast.Contains(x / c.t.chunksPerRegularPiece()) {
+ c.requestState.Requests.Iterate(func(x RequestIndex) bool {
+ if !c.peerAllowedFast.Contains(c.t.pieceIndexOfRequestIndex(x)) {
preservedCount++
}
return true
if preservedCount != 0 {
// TODO: Yes this is a debug log but I'm not happy with the state of the logging lib
// right now.
- c.logger.WithLevel(log.Debug).Printf(
+ c.logger.Levelf(log.Debug,
"%v requests were preserved while being choked (fast=%v)",
preservedCount,
c.fastEnabled())
+
torrent.Add("requestsPreservedThroughChoking", int64(preservedCount))
}
if !c.t._pendingPieces.IsEmpty() {
})
case pp.Suggest:
torrent.Add("suggests received", 1)
- log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).SetLevel(log.Debug).Log(c.t.logger)
+ log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).LogLevel(log.Debug, c.t.logger)
c.updateRequests("suggested")
case pp.HaveAll:
err = c.onPeerSentHaveAll()
case pp.Reject:
req := newRequestFromMessage(&msg)
if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) {
- log.Printf("received invalid reject [request=%v, peer=%v]", req, c)
+ c.logger.Printf("received invalid reject [request=%v, peer=%v]", req, c)
err = fmt.Errorf("received invalid reject [request=%v]", req)
}
case pp.AllowedFast:
torrent.Add("allowed fasts received", 1)
- log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).SetLevel(log.Debug).Log(c.t.logger)
+ log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).LogLevel(log.Debug, c.t.logger)
c.updateRequests("PeerConn.mainReadLoop allowed fast")
case pp.Extended:
err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload)
// Returns true if it was valid to reject the request.
func (c *Peer) remoteRejectedRequest(r RequestIndex) bool {
- if !c.deleteRequest(r) && !c.requestState.Cancelled.CheckedRemove(r) {
+ if c.deleteRequest(r) {
+ c.decPeakRequests()
+ } else if !c.requestState.Cancelled.CheckedRemove(r) {
return false
}
if c.isLowOnRequests() {
ppReq := newRequestFromMessage(msg)
req := c.t.requestIndexFromRequest(ppReq)
+ t := c.t
+
+ if c.bannableAddr.Ok() {
+ t.smartBanCache.RecordBlock(c.bannableAddr.Value(), req, msg.Piece)
+ }
if c.peerChoking {
chunksReceived.Add("while choked", 1)
}
c.decExpectedChunkReceive(req)
- if c.peerChoking && c.peerAllowedFast.Contains(bitmap.BitIndex(ppReq.Index)) {
+ if c.peerChoking && c.peerAllowedFast.Contains(pieceIndex(ppReq.Index)) {
chunksReceived.Add("due to allowed fast", 1)
}
}
}
- t := c.t
cl := t.cl
// Do we actually want this chunk?
piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
// Cancel pending requests for this chunk from *other* peers.
- if p := t.pendingRequests[req]; p != nil {
+ if p := t.requestingPeer(req); p != nil {
if p == c {
panic("should not be pending request from conn that just received it")
}
cn.t.dropConnection(cn)
}
+func (cn *PeerConn) ban() {
+ cn.t.cl.banPeerIP(cn.remoteIp())
+}
+
func (cn *Peer) netGoodPiecesDirtied() int64 {
return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
}
if c.t.requestingPeer(r) != c {
panic("only one peer should have a given request at a time")
}
- delete(c.t.pendingRequests, r)
- delete(c.t.lastRequested, r)
+ delete(c.t.requestState, r)
// c.t.iterPeers(func(p *Peer) {
// if p.isLowOnRequests() {
// p.updateRequests("Peer.deleteRequest")
return true
}
-func (c *Peer) deleteAllRequests() (deleted *roaring.Bitmap) {
- deleted = c.requestState.Requests.Clone()
- deleted.Iterate(func(x uint32) bool {
+func (c *Peer) deleteAllRequests(reason string) {
+ if c.requestState.Requests.IsEmpty() {
+ return
+ }
+ c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
if !c.deleteRequest(x) {
panic("request should exist")
}
return true
})
c.assertNoRequests()
+ c.t.iterPeers(func(p *Peer) {
+ if p.isLowOnRequests() {
+ p.updateRequests(reason)
+ }
+ })
return
}
}
}
-func (c *Peer) cancelAllRequests() (cancelled *roaring.Bitmap) {
- cancelled = c.requestState.Requests.Clone()
- cancelled.Iterate(func(x uint32) bool {
+func (c *Peer) cancelAllRequests() {
+ c.requestState.Requests.IterateSnapshot(func(x RequestIndex) bool {
c.cancel(x)
return true
})
return pc, ok
}
-func (pc *PeerConn) isLowOnRequests() bool {
- return pc.requestState.Requests.IsEmpty() && pc.requestState.Cancelled.IsEmpty()
-}
-
func (p *Peer) uncancelledRequests() uint64 {
return p.requestState.Requests.GetCardinality()
}