"sort"
"strconv"
"strings"
+ "sync/atomic"
"time"
+ "github.com/RoaringBitmap/roaring"
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo/iter"
"github.com/anacrolix/missinggo/v2/bitmap"
- "github.com/anacrolix/missinggo/v2/prioritybitmap"
"github.com/anacrolix/multiless"
"github.com/anacrolix/chansync"
// indexable with the memory space available.
type (
maxRequests = int
- requestState = request_strategy.PeerNextRequestState
+ requestState = request_strategy.PeerRequestState
)
type Peer struct {
lastChunkSent time.Time
// Stuff controlled by the local peer.
- nextRequestState requestState
- actualRequestState requestState
+ needRequestUpdate string
+ requestState requestState
+ updateRequestsTimer *time.Timer
+ lastRequestUpdate time.Time
+ peakRequests maxRequests
lastBecameInterested time.Time
priorInterest time.Duration
choking bool
piecesReceivedSinceLastRequestUpdate maxRequests
maxPiecesReceivedBetweenRequestUpdates maxRequests
- // Chunks that we might reasonably expect to receive from the peer. Due to
- // latency, buffering, and implementation differences, we may receive
- // chunks that are no longer in the set of requests actually want.
+ // Chunks that we might reasonably expect to receive from the peer. Due to latency, buffering,
+ // and implementation differences, we may receive chunks that are no longer in the set of
+ // requests actually want. This could use a roaring.BSI if the memory use becomes noticeable.
validReceiveChunks map[RequestIndex]int
// Indexed by metadata piece, set to true if posted and pending a
// response.
peerRequests map[Request]*peerRequestState
PeerPrefersEncryption bool // as indicated by 'e' field in extension handshake
PeerListenPort int
- // The pieces the peer has claimed to have.
- _peerPieces bitmap.Bitmap
- // The peer has everything. This can occur due to a special message, when
- // we may not even know the number of pieces in the torrent yet.
- peerSentHaveAll bool
// The highest possible number of pieces the torrent could have based on
// communication with the peer. Generally only useful until we have the
// torrent info.
peerMinPieces pieceIndex
// Pieces we've accepted chunks for from the peer.
peerTouchedPieces map[pieceIndex]struct{}
- peerAllowedFast bitmap.Bitmap
+ peerAllowedFast roaring.Bitmap
PeerMaxRequests maxRequests // Maximum pending requests the peer allows.
PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
- PeerClientName string
-
- pieceInclination []int
- _pieceRequestOrder prioritybitmap.PriorityBitmap
+ PeerClientName atomic.Value
logger log.Logger
}
PeerID PeerID
PeerExtensionBytes pp.PeerExtensionBits
- // The actual Conn, used for closing, and setting socket options.
+ // The actual Conn, used for closing, and setting socket options. Do not use methods on this
+ // while holding any mutexes.
conn net.Conn
// The Reader and Writer for this Conn, with hooks installed for stats,
// limiting, deadlines etc.
uploadTimer *time.Timer
pex pexConnState
+
+ // The pieces the peer has claimed to have.
+ _peerPieces roaring.Bitmap
+ // The peer has everything. This can occur due to a special message, when
+ // we may not even know the number of pieces in the torrent yet.
+ peerSentHaveAll bool
}
func (cn *PeerConn) connStatusString() string {
}
func (cn *Peer) expectingChunks() bool {
- if cn.actualRequestState.Requests.IsEmpty() {
+ if cn.requestState.Requests.IsEmpty() {
return false
}
- if !cn.actualRequestState.Interested {
+ if !cn.requestState.Interested {
return false
}
- if cn.peerAllowedFast.IterTyped(func(i int) bool {
- return roaringBitmapRangeCardinality(
- &cn.actualRequestState.Requests,
- cn.t.pieceRequestIndexOffset(i),
- cn.t.pieceRequestIndexOffset(i+1),
- ) == 0
- }) {
+ if !cn.peerChoking {
return true
}
- return !cn.peerChoking
+ haveAllowedFastRequests := false
+ cn.peerAllowedFast.Iterate(func(i uint32) bool {
+ haveAllowedFastRequests = roaringBitmapRangeCardinality(
+ &cn.requestState.Requests,
+ cn.t.pieceRequestIndexOffset(pieceIndex(i)),
+ cn.t.pieceRequestIndexOffset(pieceIndex(i+1)),
+ ) == 0
+ return !haveAllowedFastRequests
+ })
+ return haveAllowedFastRequests
}
func (cn *Peer) remoteChokingPiece(piece pieceIndex) bool {
func (cn *Peer) cumInterest() time.Duration {
ret := cn.priorInterest
- if cn.actualRequestState.Interested {
+ if cn.requestState.Interested {
ret += time.Since(cn.lastBecameInterested)
}
return ret
}
-func (cn *Peer) peerHasAllPieces() (all bool, known bool) {
+func (cn *PeerConn) peerHasAllPieces() (all, known bool) {
if cn.peerSentHaveAll {
return true, true
}
if !cn.t.haveInfo() {
return false, false
}
- return bitmap.Flip(cn._peerPieces, 0, bitmap.BitRange(cn.t.numPieces())).IsEmpty(), true
+ return cn._peerPieces.GetCardinality() == uint64(cn.t.numPieces()), true
}
-func (cn *PeerConn) locker() *lockWithDeferreds {
+func (cn *Peer) locker() *lockWithDeferreds {
return cn.t.cl.locker()
}
}
func (cn *Peer) completedString() string {
- have := pieceIndex(cn._peerPieces.Len())
- if cn.peerSentHaveAll {
+ have := pieceIndex(cn.peerPieces().GetCardinality())
+ if all, _ := cn.peerHasAllPieces(); all {
have = cn.bestPeerNumPieces()
}
return fmt.Sprintf("%d/%d", have, cn.bestPeerNumPieces())
cn.peerPiecesChanged()
}
+func (cn *PeerConn) peerPieces() *roaring.Bitmap {
+ return &cn._peerPieces
+}
+
func eventAgeString(t time.Time) string {
if t.IsZero() {
return "never"
c := func(b byte) {
ret += string([]byte{b})
}
- if cn.actualRequestState.Interested {
+ if cn.requestState.Interested {
c('i')
}
if cn.choking {
func (cn *Peer) numRequestsByPiece() (ret map[pieceIndex]int) {
ret = make(map[pieceIndex]int)
- cn.actualRequestState.Requests.Iterate(func(x uint32) bool {
+ cn.requestState.Requests.Iterate(func(x uint32) bool {
ret[pieceIndex(x/cn.t.chunksPerRegularPiece())]++
return true
})
cn.totalExpectingTime(),
)
fmt.Fprintf(w,
- " %s completed, %d pieces touched, good chunks: %v/%v-%v reqq: %d/(%d/%d)-%d/%d, flags: %s, dr: %.1f KiB/s\n",
+ " %s completed, %d pieces touched, good chunks: %v/%v:%v reqq: %d+%v/(%d/%d):%d/%d, flags: %s, dr: %.1f KiB/s\n",
cn.completedString(),
len(cn.peerTouchedPieces),
&cn._stats.ChunksReadUseful,
&cn._stats.ChunksRead,
&cn._stats.ChunksWritten,
- cn.actualRequestState.Requests.GetCardinality(),
+ cn.requestState.Requests.GetCardinality(),
+ cn.requestState.Cancelled.GetCardinality(),
cn.nominalMaxRequests(),
cn.PeerMaxRequests,
len(cn.peerRequests),
if !p.closed.Set() {
return
}
- p.discardPieceInclination()
- p._pieceRequestOrder.Clear()
+ if p.updateRequestsTimer != nil {
+ p.updateRequestsTimer.Stop()
+ }
p.peerImpl.onClose()
if p.t != nil {
p.t.decPeerPieceAvailability(p)
}
cn.tickleWriter()
if cn.conn != nil {
- cn.conn.Close()
+ go cn.conn.Close()
}
if cb := cn.callbacks.PeerConnClosed; cb != nil {
cb(cn)
}
}
+// Peer definitely has a piece, for purposes of requesting. So it's not sufficient that we think
+// they do (known=true).
func (cn *Peer) peerHasPiece(piece pieceIndex) bool {
- return cn.peerSentHaveAll || cn._peerPieces.Contains(bitmap.BitIndex(piece))
+ if all, known := cn.peerHasAllPieces(); all && known {
+ return true
+ }
+ return cn.peerPieces().ContainsInt(piece)
}
// 64KiB, but temporarily less to work around an issue with WebRTC. TODO: Update when
// https://github.com/pion/datachannel/issues/59 is fixed.
-const writeBufferHighWaterLen = 1 << 15
+const (
+ writeBufferHighWaterLen = 1 << 15
+ writeBufferLowWaterLen = writeBufferHighWaterLen / 2
+)
// Writes a message into the write buffer. Returns whether it's okay to keep writing. Writing is
// done asynchronously, so it may be that we're not able to honour backpressure from this method.
return index < len(cn.metadataRequests) && cn.metadataRequests[index]
}
+var (
+ interestedMsgLen = len(pp.Message{Type: pp.Interested}.MustMarshalBinary())
+ requestMsgLen = len(pp.Message{Type: pp.Request}.MustMarshalBinary())
+ // This is the maximum request count that could fit in the write buffer if it's at or below the
+ // low water mark when we run maybeUpdateActualRequestState.
+ maxLocalToRemoteRequests = (writeBufferHighWaterLen - writeBufferLowWaterLen - interestedMsgLen) / requestMsgLen
+)
+
// The actual value to use as the maximum outbound requests.
-func (cn *Peer) nominalMaxRequests() (ret maxRequests) {
- return maxRequests(clamp(1, int64(cn.PeerMaxRequests), 128))
+func (cn *Peer) nominalMaxRequests() maxRequests {
+ return maxRequests(maxInt(1, minInt(cn.PeerMaxRequests, cn.peakRequests*2, maxLocalToRemoteRequests)))
}
func (cn *Peer) totalExpectingTime() (ret time.Duration) {
ret += time.Since(cn.lastStartedExpectingToReceiveChunks)
}
return
-
}
func (cn *PeerConn) onPeerSentCancel(r Request) {
more = msg(pp.Message{
Type: pp.Choke,
})
- if cn.fastEnabled() {
- for r := range cn.peerRequests {
- // TODO: Don't reject pieces in allowed fast set.
- cn.reject(r)
- }
- } else {
+ if !cn.fastEnabled() {
cn.peerRequests = nil
}
return
}
func (cn *Peer) setInterested(interested bool) bool {
- if cn.actualRequestState.Interested == interested {
+ if cn.requestState.Interested == interested {
return true
}
- cn.actualRequestState.Interested = interested
+ cn.requestState.Interested = interested
if interested {
cn.lastBecameInterested = time.Now()
} else if !cn.lastBecameInterested.IsZero() {
// are okay.
type messageWriter func(pp.Message) bool
+// This function seems to only used by Peer.request. It's all logic checks, so maybe we can no-op it
+// when we want to go fast.
func (cn *Peer) shouldRequest(r RequestIndex) error {
pi := pieceIndex(r / cn.t.chunksPerRegularPiece())
+ if cn.requestState.Cancelled.Contains(r) {
+ return errors.New("request is cancelled and waiting acknowledgement")
+ }
if !cn.peerHasPiece(pi) {
return errors.New("requesting piece peer doesn't have")
}
panic("piece is queued for hash")
}
if cn.peerChoking && !cn.peerAllowedFast.Contains(bitmap.BitIndex(pi)) {
- panic("peer choking and piece not allowed fast")
+ // This could occur if we made a request with the fast extension, and then got choked and
+ // haven't had the request rejected yet.
+ if !cn.requestState.Requests.Contains(r) {
+ panic("peer choking and piece not allowed fast")
+ }
}
return nil
}
+func (cn *Peer) mustRequest(r RequestIndex) bool {
+ more, err := cn.request(r)
+ if err != nil {
+ panic(err)
+ }
+ return more
+}
+
func (cn *Peer) request(r RequestIndex) (more bool, err error) {
if err := cn.shouldRequest(r); err != nil {
panic(err)
}
- if cn.actualRequestState.Requests.Contains(r) {
+ if cn.requestState.Requests.Contains(r) {
return true, nil
}
- if maxRequests(cn.actualRequestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
+ if maxRequests(cn.requestState.Requests.GetCardinality()) >= cn.nominalMaxRequests() {
return true, errors.New("too many outstanding requests")
}
- cn.actualRequestState.Requests.Add(r)
+ cn.requestState.Requests.Add(r)
if cn.validReceiveChunks == nil {
cn.validReceiveChunks = make(map[RequestIndex]int)
}
cn.validReceiveChunks[r]++
- cn.t.pendingRequests[r]++
+ cn.t.pendingRequests[r] = cn
+ cn.t.lastRequested[r] = time.Now()
cn.updateExpectingChunks()
ppReq := cn.t.requestIndexToRequest(r)
for _, f := range cn.callbacks.SentRequest {
})
}
-func (me *Peer) cancel(r RequestIndex) bool {
- if me.deleteRequest(r) {
- return me.peerImpl._cancel(me.t.requestIndexToRequest(r))
+func (me *Peer) cancel(r RequestIndex) {
+ if !me.deleteRequest(r) {
+ panic("request not existing should have been guarded")
+ }
+ if me._cancel(r) {
+ if !me.requestState.Cancelled.CheckedAdd(r) {
+ panic("request already cancelled")
+ }
+ }
+ me.decPeakRequests()
+ if me.isLowOnRequests() {
+ me.updateRequests("Peer.cancel")
}
- return true
}
-func (me *PeerConn) _cancel(r Request) bool {
- return me.write(makeCancelMessage(r))
+func (me *PeerConn) _cancel(r RequestIndex) bool {
+ me.write(makeCancelMessage(me.t.requestIndexToRequest(r)))
+ // Transmission does not send rejects for received cancels. See
+ // https://github.com/transmission/transmission/pull/2275.
+ return me.fastEnabled() && !me.remoteIsTransmission()
}
func (cn *PeerConn) fillWriteBuffer() {
- if !cn.applyNextRequestState() {
- return
- }
+ if cn.messageWriter.writeBuffer.Len() > writeBufferLowWaterLen {
+ // Fully committing to our max requests requires sufficient space (see
+ // maxLocalToRemoteRequests). Flush what we have instead. We also prefer always to make
+ // requests than to do PEX or upload, so we short-circuit before handling those. Any update
+ // request reason will not be cleared, so we'll come right back here when there's space. We
+ // can't do this in maybeUpdateActualRequestState because it's a method on Peer and has no
+ // knowledge of write buffers.
+ }
+ cn.maybeUpdateActualRequestState()
if cn.pex.IsEnabled() {
if flow := cn.pex.Share(cn.write); !flow {
return
cn.sentHaves = bitmap.Bitmap{cn.t._completedPieces.Clone()}
}
-func (cn *PeerConn) updateRequests() {
- if peerRequesting {
- if cn.actualRequestState.Requests.GetCardinality() != 0 {
- return
- }
- cn.tickleWriter()
+// Sets a reason to update requests, and if there wasn't already one, handle it.
+func (cn *Peer) updateRequests(reason string) {
+ if cn.needRequestUpdate != "" {
+ return
+ }
+ if reason != peerUpdateRequestsTimerReason && !cn.isLowOnRequests() {
return
}
- cn.t.cl.tickleRequester()
+ cn.needRequestUpdate = reason
+ cn.handleUpdateRequests()
+}
+
+func (cn *PeerConn) handleUpdateRequests() {
+ // The writer determines the request state as needed when it can write.
+ cn.tickleWriter()
}
// Emits the indices in the Bitmaps bms in order, never repeating any index.
}
}
-// check callers updaterequests
-func (cn *Peer) stopRequestingPiece(piece pieceIndex) bool {
- return cn._pieceRequestOrder.Remove(piece)
-}
-
-// This is distinct from Torrent piece priority, which is the user's
-// preference. Connection piece priority is specific to a connection and is
-// used to pseudorandomly avoid connections always requesting the same pieces
-// and thus wasting effort.
-func (cn *Peer) updatePiecePriority(piece pieceIndex) bool {
- tpp := cn.t.piecePriority(piece)
- if !cn.peerHasPiece(piece) {
- tpp = PiecePriorityNone
- }
- if tpp == PiecePriorityNone {
- return cn.stopRequestingPiece(piece)
- }
- prio := cn.getPieceInclination()[piece]
- return cn._pieceRequestOrder.Set(piece, prio)
-}
-
-func (cn *Peer) getPieceInclination() []int {
- if cn.pieceInclination == nil {
- cn.pieceInclination = cn.t.getConnPieceInclination()
- }
- return cn.pieceInclination
-}
-
-func (cn *Peer) discardPieceInclination() {
- if cn.pieceInclination == nil {
- return
- }
- cn.t.putPieceInclination(cn.pieceInclination)
- cn.pieceInclination = nil
-}
-
func (cn *Peer) peerPiecesChanged() {
- if cn.t.haveInfo() {
- prioritiesChanged := false
- for i := pieceIndex(0); i < cn.t.numPieces(); i++ {
- if cn.updatePiecePriority(i) {
- prioritiesChanged = true
- }
- }
- if prioritiesChanged {
- cn.updateRequests()
- }
- }
cn.t.maybeDropMutuallyCompletePeer(cn)
}
if !cn.peerHasPiece(piece) {
cn.t.incPieceAvailability(piece)
}
- cn._peerPieces.Set(bitmap.BitIndex(piece), true)
- cn.t.maybeDropMutuallyCompletePeer(&cn.Peer)
- if cn.updatePiecePriority(piece) {
- cn.updateRequests()
+ cn._peerPieces.Add(uint32(piece))
+ if cn.t.wantPieceIndex(piece) {
+ cn.updateRequests("have")
}
+ cn.peerPiecesChanged()
return nil
}
// Ignore known excess pieces.
bf = bf[:cn.t.numPieces()]
}
- pp := cn.newPeerPieces()
+ bm := boolSliceToBitmap(bf)
+ if cn.t.haveInfo() && pieceIndex(bm.GetCardinality()) == cn.t.numPieces() {
+ cn.onPeerHasAllPieces()
+ return nil
+ }
+ if !bm.IsEmpty() {
+ cn.raisePeerMinPieces(pieceIndex(bm.Maximum()) + 1)
+ }
+ shouldUpdateRequests := false
+ if cn.peerSentHaveAll {
+ if !cn.t.deleteConnWithAllPieces(&cn.Peer) {
+ panic(cn)
+ }
+ cn.peerSentHaveAll = false
+ if !cn._peerPieces.IsEmpty() {
+ panic("if peer has all, we expect no individual peer pieces to be set")
+ }
+ } else {
+ bm.Xor(&cn._peerPieces)
+ }
cn.peerSentHaveAll = false
- for i, have := range bf {
- if have {
- cn.raisePeerMinPieces(pieceIndex(i) + 1)
- if !pp.Contains(bitmap.BitIndex(i)) {
- cn.t.incPieceAvailability(i)
- }
+ // bm is now 'on' for pieces that are changing
+ bm.Iterate(func(x uint32) bool {
+ pi := pieceIndex(x)
+ if cn._peerPieces.Contains(x) {
+ // Then we must be losing this piece
+ cn.t.decPieceAvailability(pi)
} else {
- if pp.Contains(bitmap.BitIndex(i)) {
- cn.t.decPieceAvailability(i)
+ if !shouldUpdateRequests && cn.t.wantPieceIndex(pieceIndex(x)) {
+ shouldUpdateRequests = true
}
+ // We must be gaining this piece
+ cn.t.incPieceAvailability(pieceIndex(x))
}
- cn._peerPieces.Set(bitmap.BitIndex(i), have)
+ return true
+ })
+ // Apply the changes. If we had everything previously, this should be empty, so xor is the same
+ // as or.
+ cn._peerPieces.Xor(&bm)
+ if shouldUpdateRequests {
+ cn.updateRequests("bitfield")
}
+ // We didn't guard this before, I see no reason to do it now.
cn.peerPiecesChanged()
return nil
}
-func (cn *Peer) onPeerHasAllPieces() {
+func (cn *PeerConn) onPeerHasAllPieces() {
t := cn.t
if t.haveInfo() {
- npp, pc := cn.newPeerPieces(), t.numPieces()
- for i := 0; i < pc; i += 1 {
- if !npp.Contains(bitmap.BitIndex(i)) {
- t.incPieceAvailability(i)
- }
- }
+ cn._peerPieces.Iterate(func(x uint32) bool {
+ t.decPieceAvailability(pieceIndex(x))
+ return true
+ })
}
+ t.addConnWithAllPieces(&cn.Peer)
cn.peerSentHaveAll = true
cn._peerPieces.Clear()
+ if !cn.t._pendingPieces.IsEmpty() {
+ cn.updateRequests("Peer.onPeerHasAllPieces")
+ }
cn.peerPiecesChanged()
}
}
func (cn *PeerConn) peerSentHaveNone() error {
- cn.t.decPeerPieceAvailability(&cn.Peer)
+ if cn.peerSentHaveAll {
+ cn.t.decPeerPieceAvailability(&cn.Peer)
+ }
cn._peerPieces.Clear()
cn.peerSentHaveAll = false
cn.peerPiecesChanged()
cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesWritten }))
}
-func (cn *PeerConn) readBytes(n int64) {
+func (cn *Peer) readBytes(n int64) {
cn.allStats(add(n, func(cs *ConnStats) *Count { return &cs.BytesRead }))
}
value := &peerRequestState{}
c.peerRequests[r] = value
go c.peerRequestDataReader(r, value)
- //c.tickleWriter()
return nil
}
if b == nil {
panic("data must be non-nil to trigger send")
}
+ torrent.Add("peer request data read successes", 1)
prs.data = b
c.tickleWriter()
}
// If this is maintained correctly, we might be able to support optional synchronous reading for
// chunk sending, the way it used to work.
func (c *PeerConn) peerRequestDataReadFailed(err error, r Request) {
- c.logger.WithDefaultLevel(log.Warning).Printf("error reading chunk for peer Request %v: %v", r, err)
+ torrent.Add("peer request data read failures", 1)
+ logLevel := log.Warning
+ if c.t.hasStorageCap() {
+ // It's expected that pieces might drop. See
+ // https://github.com/anacrolix/torrent/issues/702#issuecomment-1000953313.
+ logLevel = log.Debug
+ }
+ c.logger.WithDefaultLevel(logLevel).Printf("error reading chunk for peer Request %v: %v", r, err)
+ if c.t.closed.IsSet() {
+ return
+ }
i := pieceIndex(r.Index)
if c.t.pieceComplete(i) {
// There used to be more code here that just duplicated the following break. Piece
// here.
c.t.updatePieceCompletion(i)
}
- // If we failed to send a chunk, choke the peer to ensure they flush all their requests. We've
- // probably dropped a piece from storage, but there's no way to communicate this to the peer. If
- // they ask for it again, we'll kick them to allow us to send them an updated bitfield on the
- // next connect. TODO: Support rejecting here too.
- if c.choking {
- c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+ // We've probably dropped a piece from storage, but there's no way to communicate this to the
+ // peer. If they ask for it again, we kick them allowing us to send them updated piece states if
+ // we reconnect. TODO: Instead, we could just try to update them with Bitfield or HaveNone and
+ // if they kick us for breaking protocol, on reconnect we will be compliant again (at least
+ // initially).
+ if c.fastEnabled() {
+ c.reject(r)
+ } else {
+ if c.choking {
+ // If fast isn't enabled, I think we would have wiped all peer requests when we last
+ // choked, and requests while we're choking would be ignored. It could be possible that
+ // a peer request data read completed concurrently to it being deleted elsewhere.
+ c.logger.WithDefaultLevel(log.Warning).Printf("already choking peer, requests might not be rejected correctly")
+ }
+ // Choking a non-fast peer should cause them to flush all their requests.
+ c.choke(c.write)
}
- c.choke(c.write)
}
func readPeerRequestData(r Request, c *PeerConn) ([]byte, error) {
}
}
+func (c *PeerConn) logProtocolBehaviour(level log.Level, format string, arg ...interface{}) {
+ c.logger.WithLevel(level).WithContextText(fmt.Sprintf(
+ "peer id %q, ext v %q", c.PeerID, c.PeerClientName.Load(),
+ )).SkipCallers(1).Printf(format, arg...)
+}
+
// Processes incoming BitTorrent wire-protocol messages. The client lock is held upon entry and
// exit. Returning will end the connection.
func (c *PeerConn) mainReadLoop() (err error) {
}
switch msg.Type {
case pp.Choke:
- c.peerChoking = true
+ if c.peerChoking {
+ break
+ }
if !c.fastEnabled() {
- c.deleteAllRequests()
+ if !c.deleteAllRequests().IsEmpty() {
+ c.t.iterPeers(func(p *Peer) {
+ if p.isLowOnRequests() {
+ p.updateRequests("choked by non-fast PeerConn")
+ }
+ })
+ }
+ } else {
+ // We don't decrement pending requests here, let's wait for the peer to either
+ // reject or satisfy the outstanding requests. Additionally, some peers may unchoke
+ // us and resume where they left off, we don't want to have piled on to those chunks
+ // in the meanwhile. I think a peer's ability to abuse this should be limited: they
+ // could let us request a lot of stuff, then choke us and never reject, but they're
+ // only a single peer, our chunk balancing should smooth over this abuse.
}
- // We can then reset our interest.
- c.updateRequests()
+ c.peerChoking = true
c.updateExpectingChunks()
case pp.Unchoke:
+ if !c.peerChoking {
+ // Some clients do this for some reason. Transmission doesn't error on this, so we
+ // won't for consistency.
+ c.logProtocolBehaviour(log.Debug, "received unchoke when already unchoked")
+ break
+ }
c.peerChoking = false
- c.tickleWriter()
+ preservedCount := 0
+ c.requestState.Requests.Iterate(func(x uint32) bool {
+ if !c.peerAllowedFast.Contains(x / c.t.chunksPerRegularPiece()) {
+ preservedCount++
+ }
+ return true
+ })
+ if preservedCount != 0 {
+ // TODO: Yes this is a debug log but I'm not happy with the state of the logging lib
+ // right now.
+ c.logger.WithLevel(log.Debug).Printf(
+ "%v requests were preserved while being choked (fast=%v)",
+ preservedCount,
+ c.fastEnabled())
+ torrent.Add("requestsPreservedThroughChoking", int64(preservedCount))
+ }
+ if !c.t._pendingPieces.IsEmpty() {
+ c.updateRequests("unchoked")
+ }
c.updateExpectingChunks()
case pp.Interested:
c.peerInterested = true
case pp.Suggest:
torrent.Add("suggests received", 1)
log.Fmsg("peer suggested piece %d", msg.Index).AddValues(c, msg.Index).SetLevel(log.Debug).Log(c.t.logger)
- c.updateRequests()
+ c.updateRequests("suggested")
case pp.HaveAll:
err = c.onPeerSentHaveAll()
case pp.HaveNone:
err = c.peerSentHaveNone()
case pp.Reject:
- c.remoteRejectedRequest(c.t.requestIndexFromRequest(newRequestFromMessage(&msg)))
+ req := newRequestFromMessage(&msg)
+ if !c.remoteRejectedRequest(c.t.requestIndexFromRequest(req)) {
+ log.Printf("received invalid reject [request=%v, peer=%v]", req, c)
+ err = fmt.Errorf("received invalid reject [request=%v]", req)
+ }
case pp.AllowedFast:
torrent.Add("allowed fasts received", 1)
log.Fmsg("peer allowed fast: %d", msg.Index).AddValues(c).SetLevel(log.Debug).Log(c.t.logger)
- c.peerAllowedFast.Add(bitmap.BitIndex(msg.Index))
- c.updateRequests()
+ c.updateRequests("PeerConn.mainReadLoop allowed fast")
case pp.Extended:
err = c.onReadExtendedMsg(msg.ExtendedID, msg.ExtendedPayload)
default:
}
}
-func (c *Peer) remoteRejectedRequest(r RequestIndex) {
+// Returns true if it was valid to reject the request.
+func (c *Peer) remoteRejectedRequest(r RequestIndex) bool {
if c.deleteRequest(r) {
- c.decExpectedChunkReceive(r)
+ c.decPeakRequests()
+ } else if !c.requestState.Cancelled.CheckedRemove(r) {
+ return false
}
+ if c.isLowOnRequests() {
+ c.updateRequests("Peer.remoteRejectedRequest")
+ }
+ c.decExpectedChunkReceive(r)
+ return true
}
func (c *Peer) decExpectedChunkReceive(r RequestIndex) {
if cb := c.callbacks.ReadExtendedHandshake; cb != nil {
cb(c, &d)
}
- //c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
+ // c.logger.WithDefaultLevel(log.Debug).Printf("received extended handshake message:\n%s", spew.Sdump(d))
if d.Reqq != 0 {
c.PeerMaxRequests = d.Reqq
}
- c.PeerClientName = d.V
+ c.PeerClientName.Store(d.V)
if c.PeerExtensionIDs == nil {
c.PeerExtensionIDs = make(map[pp.ExtensionName]pp.ExtensionNumber, len(d.M))
}
c.PeerPrefersEncryption = d.Encryption
for name, id := range d.M {
if _, ok := c.PeerExtensionIDs[name]; !ok {
- peersSupportingExtension.Add(string(name), 1)
+ peersSupportingExtension.Add(
+ // expvar.Var.String must produce valid JSON. "ut_payme\xeet_address" was being
+ // entered here which caused problems later when unmarshalling.
+ strconv.Quote(string(name)),
+ 1)
}
c.PeerExtensionIDs[name] = id
}
}
c.decExpectedChunkReceive(req)
- if c.peerChoking && c.peerAllowedFast.Get(bitmap.BitIndex(ppReq.Index)) {
+ if c.peerChoking && c.peerAllowedFast.Contains(bitmap.BitIndex(ppReq.Index)) {
chunksReceived.Add("due to allowed fast", 1)
}
// The request needs to be deleted immediately to prevent cancels occurring asynchronously when
// have actually already received the piece, while we have the Client unlocked to write the data
// out.
- deletedRequest := false
+ intended := false
{
- if c.actualRequestState.Requests.Contains(req) {
+ if c.requestState.Requests.Contains(req) {
for _, f := range c.callbacks.ReceivedRequested {
f(PeerMessageEvent{c, msg})
}
}
// Request has been satisfied.
- if c.deleteRequest(req) {
- deletedRequest = true
+ if c.deleteRequest(req) || c.requestState.Cancelled.CheckedRemove(req) {
+ intended = true
if !c.peerChoking {
c._chunksReceivedWhileExpecting++
}
+ if c.isLowOnRequests() {
+ c.updateRequests("Peer.receiveChunk deleted request")
+ }
} else {
- chunksReceived.Add("unwanted", 1)
+ chunksReceived.Add("unintended", 1)
}
}
// Do we actually want this chunk?
if t.haveChunk(ppReq) {
- chunksReceived.Add("wasted", 1)
+ // panic(fmt.Sprintf("%+v", ppReq))
+ chunksReceived.Add("redundant", 1)
c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadWasted }))
return nil
}
c.allStats(add(1, func(cs *ConnStats) *Count { return &cs.ChunksReadUseful }))
c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulData }))
- if deletedRequest {
+ if intended {
c.piecesReceivedSinceLastRequestUpdate++
- c.updateRequests()
c.allStats(add(int64(len(msg.Piece)), func(cs *ConnStats) *Count { return &cs.BytesReadUsefulIntendedData }))
}
for _, f := range c.t.cl.config.Callbacks.ReceivedUsefulData {
piece.unpendChunkIndex(chunkIndexFromChunkSpec(ppReq.ChunkSpec, t.chunkSize))
// Cancel pending requests for this chunk from *other* peers.
- t.iterPeers(func(p *Peer) {
+ if p := t.pendingRequests[req]; p != nil {
if p == c {
- return
+ panic("should not be pending request from conn that just received it")
}
p.cancel(req)
- })
+ }
err := func() error {
cl.unlock()
if err != nil {
c.logger.WithDefaultLevel(log.Error).Printf("writing received chunk %v: %v", req, err)
t.pendRequest(req)
- //t.updatePieceCompletion(pieceIndex(msg.Index))
+ // Necessary to pass TestReceiveChunkStorageFailureSeederFastExtensionDisabled. I think a
+ // request update runs while we're writing the chunk that just failed. Then we never do a
+ // fresh update after pending the failed request.
+ c.updateRequests("Peer.receiveChunk error writing chunk")
t.onWriteChunkErr(err)
return nil
}
}
func (c *Peer) peerHasWantedPieces() bool {
- return !c._pieceRequestOrder.IsEmpty()
+ if all, _ := c.peerHasAllPieces(); all {
+ return !c.t.haveAllPieces() && !c.t._pendingPieces.IsEmpty()
+ }
+ if !c.t.haveInfo() {
+ return !c.peerPieces().IsEmpty()
+ }
+ return c.peerPieces().Intersects(&c.t._pendingPieces)
}
+// Returns true if an outstanding request is removed. Cancelled requests should be handled
+// separately.
func (c *Peer) deleteRequest(r RequestIndex) bool {
- c.nextRequestState.Requests.Remove(r)
- if !c.actualRequestState.Requests.CheckedRemove(r) {
+ if !c.requestState.Requests.CheckedRemove(r) {
return false
}
for _, f := range c.callbacks.DeletedRequest {
f(PeerRequestEvent{c, c.t.requestIndexToRequest(r)})
}
c.updateExpectingChunks()
- pr := c.t.pendingRequests
- pr[r]--
- n := pr[r]
- if n == 0 {
- delete(pr, r)
- }
- if n < 0 {
- panic(n)
- }
+ if c.t.requestingPeer(r) != c {
+ panic("only one peer should have a given request at a time")
+ }
+ delete(c.t.pendingRequests, r)
+ delete(c.t.lastRequested, r)
+ // c.t.iterPeers(func(p *Peer) {
+ // if p.isLowOnRequests() {
+ // p.updateRequests("Peer.deleteRequest")
+ // }
+ // })
return true
}
-func (c *Peer) deleteAllRequests() {
- c.actualRequestState.Requests.Clone().Iterate(func(x uint32) bool {
- c.deleteRequest(x)
+func (c *Peer) deleteAllRequests() (deleted *roaring.Bitmap) {
+ deleted = c.requestState.Requests.Clone()
+ deleted.Iterate(func(x uint32) bool {
+ if !c.deleteRequest(x) {
+ panic("request should exist")
+ }
return true
})
- if !c.actualRequestState.Requests.IsEmpty() {
- panic(c.actualRequestState.Requests.GetCardinality())
+ c.assertNoRequests()
+ return
+}
+
+func (c *Peer) assertNoRequests() {
+ if !c.requestState.Requests.IsEmpty() {
+ panic(c.requestState.Requests.GetCardinality())
}
- c.nextRequestState.Requests.Clear()
- // for c := range c.t.conns {
- // c.tickleWriter()
- // }
+}
+
+func (c *Peer) cancelAllRequests() (cancelled *roaring.Bitmap) {
+ cancelled = c.requestState.Requests.Clone()
+ cancelled.Iterate(func(x uint32) bool {
+ c.cancel(x)
+ return true
+ })
+ c.assertNoRequests()
+ return
}
// This is called when something has changed that should wake the writer, such as putting stuff into
func (c *PeerConn) pexEvent(t pexEventType) pexEvent {
f := c.pexPeerFlags()
addr := c.dialAddr()
- return pexEvent{t, addr, f}
+ return pexEvent{t, addr, f, nil}
}
func (c *PeerConn) String() string {
- return fmt.Sprintf("connection %p", c)
+ return fmt.Sprintf("%T %p [id=%q, exts=%v, v=%q]", c, c, c.PeerID, c.PeerExtensionBytes, c.PeerClientName.Load())
}
func (c *Peer) trust() connectionTrust {
// Returns the pieces the peer could have based on their claims. If we don't know how many pieces
// are in the torrent, it could be a very large range the peer has sent HaveAll.
-func (cn *PeerConn) PeerPieces() bitmap.Bitmap {
+func (cn *PeerConn) PeerPieces() *roaring.Bitmap {
cn.locker().RLock()
defer cn.locker().RUnlock()
return cn.newPeerPieces()
}
// Returns a new Bitmap that includes bits for all pieces the peer could have based on their claims.
-func (cn *Peer) newPeerPieces() bitmap.Bitmap {
- ret := cn._peerPieces.Copy()
- if cn.peerSentHaveAll {
+func (cn *Peer) newPeerPieces() *roaring.Bitmap {
+ // TODO: Can we use copy on write?
+ ret := cn.peerPieces().Clone()
+ if all, _ := cn.peerHasAllPieces(); all {
if cn.t.haveInfo() {
ret.AddRange(0, bitmap.BitRange(cn.t.numPieces()))
} else {
return pc, ok
}
-func (p *PeerConn) onNextRequestStateChanged() {
- p.tickleWriter()
+func (p *Peer) uncancelledRequests() uint64 {
+ return p.requestState.Requests.GetCardinality()
+}
+
+func (pc *PeerConn) remoteIsTransmission() bool {
+ return bytes.HasPrefix(pc.PeerID[:], []byte("-TR")) && pc.PeerID[7] == '-'
}