"github.com/anacrolix/dht/v2"
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo"
- "github.com/anacrolix/missinggo/bitmap"
"github.com/anacrolix/missinggo/iter"
- "github.com/anacrolix/missinggo/prioritybitmap"
+ "github.com/anacrolix/missinggo/v2/bitmap"
+ "github.com/anacrolix/missinggo/v2/prioritybitmap"
"github.com/anacrolix/multiless"
"github.com/pkg/errors"
// Maintains the state of a connection with a peer.
type connection struct {
// First to ensure 64-bit alignment for atomics. See #262.
- stats ConnStats
+ _stats ConnStats
t *Torrent
// The actual Conn, used for closing, and setting socket options.
lastStartedExpectingToReceiveChunks time.Time
cumulativeExpectedToReceiveChunks time.Duration
- chunksReceivedWhileExpecting int64
+ _chunksReceivedWhileExpecting int64
Choked bool
requests map[request]struct{}
PeerRequests map[request]struct{}
PeerExtensionBytes pp.PeerExtensionBits
// The pieces the peer has claimed to have.
- peerPieces bitmap.Bitmap
+ _peerPieces bitmap.Bitmap
// The peer has everything. This can occur due to a special message, when
// we may not even know the number of pieces in the torrent yet.
peerSentHaveAll bool
PeerExtensionIDs map[pp.ExtensionName]pp.ExtensionNumber
PeerClientName string
- pieceInclination []int
- pieceRequestOrder prioritybitmap.PriorityBitmap
+ pieceInclination []int
+ _pieceRequestOrder prioritybitmap.PriorityBitmap
writeBuffer *bytes.Buffer
uploadTimer *time.Timer
if !cn.t.haveInfo() {
return false, false
}
- return bitmap.Flip(cn.peerPieces, 0, bitmap.BitIndex(cn.t.numPieces())).IsEmpty(), true
+ return bitmap.Flip(cn._peerPieces, 0, bitmap.BitIndex(cn.t.numPieces())).IsEmpty(), true
}
func (cn *connection) mu() sync.Locker {
}
func (cn *connection) completedString() string {
- have := pieceIndex(cn.peerPieces.Len())
+ have := pieceIndex(cn._peerPieces.Len())
if cn.peerSentHaveAll {
have = cn.bestPeerNumPieces()
}
// invalid, such as by receiving badly sized BITFIELD, or invalid HAVE
// messages.
func (cn *connection) setNumPieces(num pieceIndex) error {
- cn.peerPieces.RemoveRange(bitmap.BitIndex(num), bitmap.ToEnd)
+ cn._peerPieces.RemoveRange(bitmap.BitIndex(num), bitmap.ToEnd)
cn.peerPiecesChanged()
return nil
}
// }
func (cn *connection) downloadRate() float64 {
- return float64(cn.stats.BytesReadUsefulData.Int64()) / cn.cumInterest().Seconds()
+ return float64(cn._stats.BytesReadUsefulData.Int64()) / cn.cumInterest().Seconds()
}
func (cn *connection) WriteStatus(w io.Writer, t *Torrent) {
" %s completed, %d pieces touched, good chunks: %v/%v-%v reqq: (%d,%d,%d]-%d, flags: %s, dr: %.1f KiB/s\n",
cn.completedString(),
len(cn.peerTouchedPieces),
- &cn.stats.ChunksReadUseful,
- &cn.stats.ChunksRead,
- &cn.stats.ChunksWritten,
+ &cn._stats.ChunksReadUseful,
+ &cn._stats.ChunksRead,
+ &cn._stats.ChunksWritten,
cn.requestsLowWater,
cn.numLocalRequests(),
cn.nominalMaxRequests(),
fmt.Fprintf(w, " next pieces: %v%s\n",
iter.ToSlice(iter.Head(10, cn.iterPendingPiecesUntyped)),
func() string {
- if cn.shouldRequestWithoutBias() {
+ if cn == t.fastestConn {
return " (fastest)"
} else {
return ""
}
- }())
+ }(),
+ )
}
func (cn *connection) Close() {
}
cn.tickleWriter()
cn.discardPieceInclination()
- cn.pieceRequestOrder.Clear()
+ cn._pieceRequestOrder.Clear()
if cn.conn != nil {
go cn.conn.Close()
}
}
func (cn *connection) PeerHasPiece(piece pieceIndex) bool {
- return cn.peerSentHaveAll || cn.peerPieces.Contains(bitmap.BitIndex(piece))
+ return cn.peerSentHaveAll || cn._peerPieces.Contains(bitmap.BitIndex(piece))
}
// Writes a message into the write buffer.
// The actual value to use as the maximum outbound requests.
func (cn *connection) nominalMaxRequests() (ret int) {
- if cn.t.requestStrategy == 3 {
- expectingTime := int64(cn.totalExpectingTime())
- if expectingTime == 0 {
- expectingTime = math.MaxInt64
- } else {
- expectingTime *= 2
- }
- return int(clamp(
- 1,
- int64(cn.PeerMaxRequests),
- max(
- // It makes sense to always pipeline at least one connection, since latency must be
- // non-zero.
- 2,
- // Request only as many as we expect to receive in the duplicateRequestTimeout
- // window. We are trying to avoid having to duplicate requests.
- cn.chunksReceivedWhileExpecting*int64(cn.t.duplicateRequestTimeout)/expectingTime,
- ),
- ))
- }
return int(clamp(
1,
int64(cn.PeerMaxRequests),
+ int64(cn.t.requestStrategy.nominalMaxRequests(cn.requestStrategyConnection())),
+ ))
+}
+
+// The actual value to use as the maximum outbound requests.
+func (requestStrategyThree) nominalMaxRequests(cn requestStrategyConnection) (ret int) {
+ expectingTime := int64(cn.totalExpectingTime())
+ if expectingTime == 0 {
+ expectingTime = math.MaxInt64
+ } else {
+ expectingTime *= 2
+ }
+ return int(clamp(
+ 1,
+ int64(cn.peerMaxRequests()),
+ max(
+ // It makes sense to always pipeline at least one connection, since latency must be
+ // non-zero.
+ 2,
+ // Request only as many as we expect to receive in the duplicateRequestTimeout
+ // window. We are trying to avoid having to duplicate requests.
+ cn.chunksReceivedWhileExpecting()*int64(cn.torrent().duplicateRequestTimeout())/expectingTime,
+ ),
+ ))
+}
+func defaultNominalMaxRequests(cn requestStrategyConnection) int {
+ return int(
max(64,
- cn.stats.ChunksReadUseful.Int64()-(cn.stats.ChunksRead.Int64()-cn.stats.ChunksReadUseful.Int64()))))
+ cn.stats().ChunksReadUseful.Int64()-(cn.stats().ChunksRead.Int64()-cn.stats().ChunksReadUseful.Int64())))
+}
+func (rs requestStrategyOne) nominalMaxRequests(cn requestStrategyConnection) int {
+ return defaultNominalMaxRequests(cn)
+}
+func (rs requestStrategyTwo) nominalMaxRequests(cn requestStrategyConnection) int {
+ return defaultNominalMaxRequests(cn)
}
func (cn *connection) totalExpectingTime() (ret time.Duration) {
}
cn.validReceiveChunks[r] = struct{}{}
cn.t.pendingRequests[r]++
- cn.t.lastRequested[r] = time.AfterFunc(cn.t.duplicateRequestTimeout, func() {
+ cn.t.lastRequested[r] = time.AfterFunc(cn.t._duplicateRequestTimeout, func() {
torrent.Add("duplicate request timeouts", 1)
cn.mu().Lock()
defer cn.mu().Unlock()
Type: pp.Bitfield,
Bitfield: cn.t.bitfield(),
})
- cn.sentHaves = cn.t.completedPieces.Copy()
+ cn.sentHaves = cn.t._completedPieces.Copy()
}
func (cn *connection) updateRequests() {
}
}
-func (cn *connection) iterUnbiasedPieceRequestOrder(f func(piece pieceIndex) bool) bool {
- now, readahead := cn.t.readerPiecePriorities()
- var skip bitmap.Bitmap
- if !cn.peerSentHaveAll {
- // Pieces to skip include pieces the peer doesn't have.
- skip = bitmap.Flip(cn.peerPieces, 0, bitmap.BitIndex(cn.t.numPieces()))
- }
- // And pieces that we already have.
- skip.Union(cn.t.completedPieces)
- skip.Union(cn.t.piecesQueuedForHash)
+func iterUnbiasedPieceRequestOrder(cn requestStrategyConnection, f func(piece pieceIndex) bool) bool {
+ now, readahead := cn.torrent().readerPiecePriorities()
+ skip := bitmap.Flip(cn.peerPieces(), 0, cn.torrent().numPieces())
+ skip.Union(cn.torrent().ignorePieces())
// Return an iterator over the different priority classes, minus the skip pieces.
return iter.All(
func(_piece interface{}) bool {
- i := _piece.(bitmap.BitIndex)
- if cn.t.hashingPiece(pieceIndex(i)) {
- return true
- }
- return f(pieceIndex(i))
+ return f(pieceIndex(_piece.(bitmap.BitIndex)))
},
iterBitmapsDistinct(&skip, now, readahead),
- // We have to iterate pendingPieces separately because it isn't a Bitmap.
+ // We have to iterate _pendingPieces separately because it isn't a Bitmap.
func(cb iter.Callback) {
- cn.t.pendingPieces.IterTyped(func(piece int) bool {
+ cn.torrent().pendingPieces().IterTyped(func(piece int) bool {
if skip.Contains(piece) {
return true
}
// assigned to the highest priority pieces, and assigning more than one this role would cause
// significant wasted bandwidth.
func (cn *connection) shouldRequestWithoutBias() bool {
- if cn.t.requestStrategy != 2 {
- return false
- }
- if len(cn.t.readers) == 0 {
+ return cn.t.requestStrategy.shouldRequestWithoutBias(cn.requestStrategyConnection())
+}
+
+func defaultShouldRequestWithoutBias(cn requestStrategyConnection) bool {
+ return false
+}
+
+func (requestStrategyTwo) shouldRequestWithoutBias(cn requestStrategyConnection) bool {
+ if cn.torrent().numReaders() == 0 {
return false
}
- if len(cn.t.conns) == 1 {
+ if cn.torrent().numConns() == 1 {
return true
}
- if cn == cn.t.fastestConn {
+ if cn.fastest() {
return true
}
return false
}
+func (requestStrategyOne) shouldRequestWithoutBias(cn requestStrategyConnection) bool {
+ return defaultShouldRequestWithoutBias(cn)
+}
+
+func (requestStrategyThree) shouldRequestWithoutBias(cn requestStrategyConnection) bool {
+ return defaultShouldRequestWithoutBias(cn)
+}
+
func (cn *connection) iterPendingPieces(f func(pieceIndex) bool) bool {
if !cn.t.haveInfo() {
return false
}
- if cn.t.requestStrategy == 3 {
- return cn.iterUnbiasedPieceRequestOrder(f)
- }
- if cn.shouldRequestWithoutBias() {
- return cn.iterUnbiasedPieceRequestOrder(f)
+ return cn.t.requestStrategy.iterPendingPieces(cn, f)
+}
+func (requestStrategyThree) iterPendingPieces(cn requestStrategyConnection, f func(pieceIndex) bool) bool {
+ return iterUnbiasedPieceRequestOrder(cn, f)
+}
+func defaultIterPendingPieces(rs requestStrategy, cn requestStrategyConnection, f func(pieceIndex) bool) bool {
+ if rs.shouldRequestWithoutBias(cn) {
+ return iterUnbiasedPieceRequestOrder(cn, f)
} else {
- return cn.pieceRequestOrder.IterTyped(func(i int) bool {
+ return cn.pieceRequestOrder().IterTyped(func(i int) bool {
return f(pieceIndex(i))
})
}
}
+func (rs requestStrategyOne) iterPendingPieces(cn requestStrategyConnection, cb func(pieceIndex) bool) bool {
+ return defaultIterPendingPieces(rs, cn, cb)
+}
+func (rs requestStrategyTwo) iterPendingPieces(cn requestStrategyConnection, cb func(pieceIndex) bool) bool {
+ return defaultIterPendingPieces(rs, cn, cb)
+}
func (cn *connection) iterPendingPiecesUntyped(f iter.Callback) {
cn.iterPendingPieces(func(i pieceIndex) bool { return f(i) })
}
func (cn *connection) iterPendingRequests(piece pieceIndex, f func(request) bool) bool {
- return iterUndirtiedChunks(piece, cn.t, func(cs chunkSpec) bool {
- r := request{pp.Integer(piece), cs}
- if cn.t.requestStrategy == 3 {
- if _, ok := cn.t.lastRequested[r]; ok {
- // This piece has been requested on another connection, and
- // the duplicate request timer is still running.
- return true
- }
- }
- return f(r)
- })
+ return cn.t.requestStrategy.iterUndirtiedChunks(cn.t.piece(piece).requestStrategyPiece(),
+ func(cs chunkSpec) bool {
+ return f(request{pp.Integer(piece), cs})
+ })
}
-func iterUndirtiedChunks(piece pieceIndex, t *Torrent, f func(chunkSpec) bool) bool {
- p := &t.pieces[piece]
- if t.requestStrategy == 3 {
- for i := pp.Integer(0); i < p.numChunks(); i++ {
- if !p.dirtyChunks.Get(bitmap.BitIndex(i)) {
- if !f(t.chunkIndexSpec(i, piece)) {
- return false
- }
- }
+func (requestStrategyThree) iterUndirtiedChunks(p requestStrategyPiece, f func(chunkSpec) bool) bool {
+ for i := pp.Integer(0); i < pp.Integer(p.numChunks()); i++ {
+ if p.dirtyChunks().Get(bitmap.BitIndex(i)) {
+ continue
+ }
+ ci := p.chunkIndexSpec(i)
+ if p.wouldDuplicateRecent(ci) {
+ continue
+ }
+ if !f(p.chunkIndexSpec(i)) {
+ return false
}
- return true
}
- chunkIndices := t.pieces[piece].undirtiedChunkIndices()
+ return true
+}
+
+func defaultIterUndirtiedChunks(p requestStrategyPiece, f func(chunkSpec) bool) bool {
+ chunkIndices := p.dirtyChunks().Copy()
+ chunkIndices.FlipRange(0, bitmap.BitIndex(p.numChunks()))
return iter.ForPerm(chunkIndices.Len(), func(i int) bool {
ci, err := chunkIndices.RB.Select(uint32(i))
if err != nil {
panic(err)
}
- return f(t.chunkIndexSpec(pp.Integer(ci), piece))
+ return f(p.chunkIndexSpec(pp.Integer(ci)))
})
}
+func (rs requestStrategyOne) iterUndirtiedChunks(p requestStrategyPiece, f func(chunkSpec) bool) bool {
+ return defaultIterUndirtiedChunks(p, f)
+}
+
+func (rs requestStrategyTwo) iterUndirtiedChunks(p requestStrategyPiece, f func(chunkSpec) bool) bool {
+ return defaultIterUndirtiedChunks(p, f)
+}
+
// check callers updaterequests
func (cn *connection) stopRequestingPiece(piece pieceIndex) bool {
- return cn.pieceRequestOrder.Remove(bitmap.BitIndex(piece))
+ return cn._pieceRequestOrder.Remove(bitmap.BitIndex(piece))
}
// This is distinct from Torrent piece priority, which is the user's
return cn.stopRequestingPiece(piece)
}
prio := cn.getPieceInclination()[piece]
- switch cn.t.requestStrategy {
- case 1:
- switch tpp {
- case PiecePriorityNormal:
- case PiecePriorityReadahead:
- prio -= int(cn.t.numPieces())
- case PiecePriorityNext, PiecePriorityNow:
- prio -= 2 * int(cn.t.numPieces())
- default:
- panic(tpp)
- }
- prio += int(piece / 3)
+ prio = cn.t.requestStrategy.piecePriority(cn, piece, tpp, prio)
+ return cn._pieceRequestOrder.Set(bitmap.BitIndex(piece), prio) || cn.shouldRequestWithoutBias()
+}
+
+func (requestStrategyOne) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int {
+ switch tpp {
+ case PiecePriorityNormal:
+ case PiecePriorityReadahead:
+ prio -= int(cn.torrent().numPieces())
+ case PiecePriorityNext, PiecePriorityNow:
+ prio -= 2 * int(cn.torrent().numPieces())
default:
+ panic(tpp)
}
- return cn.pieceRequestOrder.Set(bitmap.BitIndex(piece), prio) || cn.shouldRequestWithoutBias()
+ prio += int(piece / 3)
+ return prio
+}
+
+func defaultPiecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int {
+ return prio
+}
+
+func (requestStrategyTwo) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int {
+ return defaultPiecePriority(cn, piece, tpp, prio)
+}
+
+func (requestStrategyThree) piecePriority(cn requestStrategyConnection, piece pieceIndex, tpp piecePriority, prio int) int {
+ return defaultPiecePriority(cn, piece, tpp, prio)
}
func (cn *connection) getPieceInclination() []int {
return nil
}
cn.raisePeerMinPieces(piece + 1)
- cn.peerPieces.Set(bitmap.BitIndex(piece), true)
+ cn._peerPieces.Set(bitmap.BitIndex(piece), true)
if cn.updatePiecePriority(piece) {
cn.updateRequests()
}
if have {
cn.raisePeerMinPieces(pieceIndex(i) + 1)
}
- cn.peerPieces.Set(i, have)
+ cn._peerPieces.Set(i, have)
}
cn.peerPiecesChanged()
return nil
func (cn *connection) onPeerSentHaveAll() error {
cn.peerSentHaveAll = true
- cn.peerPieces.Clear()
+ cn._peerPieces.Clear()
cn.peerPiecesChanged()
return nil
}
func (cn *connection) peerSentHaveNone() error {
- cn.peerPieces.Clear()
+ cn._peerPieces.Clear()
cn.peerSentHaveAll = false
cn.peerPiecesChanged()
return nil
// until the handshake is complete, after which it's expected to reconcile the
// differences.
func (cn *connection) allStats(f func(*ConnStats)) {
- f(&cn.stats)
+ f(&cn._stats)
if cn.reconciledHandshakeStats {
cn.postHandshakeStats(f)
}
// Request has been satisfied.
if c.deleteRequest(req) {
if c.expectingChunks() {
- c.chunksReceivedWhileExpecting++
+ c._chunksReceivedWhileExpecting++
}
} else {
torrent.Add("chunks received unwanted", 1)
return false
}
// Don't upload more than 100 KiB more than we download.
- if c.stats.BytesWrittenData.Int64() >= c.stats.BytesReadData.Int64()+100<<10 {
+ if c._stats.BytesWrittenData.Int64() >= c._stats.BytesReadData.Int64()+100<<10 {
return false
}
return true
}
func (cn *connection) netGoodPiecesDirtied() int64 {
- return cn.stats.PiecesDirtiedGood.Int64() - cn.stats.PiecesDirtiedBad.Int64()
+ return cn._stats.PiecesDirtiedGood.Int64() - cn._stats.PiecesDirtiedBad.Int64()
}
func (c *connection) peerHasWantedPieces() bool {
- return !c.pieceRequestOrder.IsEmpty()
+ return !c._pieceRequestOrder.IsEmpty()
}
func (c *connection) numLocalRequests() int {
func (l connectionTrust) Less(r connectionTrust) bool {
return multiless.New().Bool(l.Implicit, r.Implicit).Int64(l.NetGoodPiecesDirted, r.NetGoodPiecesDirted).Less()
}
+
+func (cn *connection) requestStrategyConnection() requestStrategyConnection {
+ return cn
+}
+
+func (cn *connection) chunksReceivedWhileExpecting() int64 {
+ return cn._chunksReceivedWhileExpecting
+}
+
+func (cn *connection) fastest() bool {
+ return cn == cn.t.fastestConn
+}
+
+func (cn *connection) peerMaxRequests() int {
+ return cn.PeerMaxRequests
+}
+
+func (cn *connection) peerPieces() bitmap.Bitmap {
+ ret := cn._peerPieces.Copy()
+ if cn.peerSentHaveAll {
+ ret.AddRange(0, cn.t.numPieces())
+ }
+ return ret
+}
+
+func (cn *connection) pieceRequestOrder() *prioritybitmap.PriorityBitmap {
+ return &cn._pieceRequestOrder
+}
+
+func (cn *connection) stats() *ConnStats {
+ return &cn._stats
+}
+
+func (cn *connection) torrent() requestStrategyTorrent {
+ return cn.t.requestStrategyTorrent()
+}
"github.com/anacrolix/dht/v2"
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo"
- "github.com/anacrolix/missinggo/bitmap"
"github.com/anacrolix/missinggo/perf"
- "github.com/anacrolix/missinggo/prioritybitmap"
"github.com/anacrolix/missinggo/pubsub"
"github.com/anacrolix/missinggo/slices"
+ "github.com/anacrolix/missinggo/v2/bitmap"
+ "github.com/anacrolix/missinggo/v2/prioritybitmap"
"github.com/anacrolix/torrent/bencode"
"github.com/anacrolix/torrent/metainfo"
networkingEnabled bool
- // Determines what chunks to request from peers. 1: Favour higher priority pieces with some
- // fuzzing to reduce overlaps and wastage across connections. 2: The fastest connection
- // downloads strictly in order of priority, while all others adhere to their piece inclinations.
- // 3: Requests are strictly by piece priority, and not duplicated until duplicateRequestTimeout
- // is reached.
- requestStrategy int
+ // Determines what chunks to request from peers.
+ requestStrategy requestStrategy
// How long to avoid duplicating a pending request.
- duplicateRequestTimeout time.Duration
+ _duplicateRequestTimeout time.Duration
closed missinggo.Event
infoHash metainfo.Hash
// Set when .Info is obtained.
gotMetainfo missinggo.Event
- readers map[*reader]struct{}
- readerNowPieces bitmap.Bitmap
- readerReadaheadPieces bitmap.Bitmap
+ readers map[*reader]struct{}
+ _readerNowPieces bitmap.Bitmap
+ _readerReadaheadPieces bitmap.Bitmap
// A cache of pieces we need to get. Calculated from various piece and
// file priorities and completion states elsewhere.
- pendingPieces prioritybitmap.PriorityBitmap
+ _pendingPieces prioritybitmap.PriorityBitmap
// A cache of completed piece indices.
- completedPieces bitmap.Bitmap
+ _completedPieces bitmap.Bitmap
// Pieces that need to be hashed.
piecesQueuedForHash bitmap.Bitmap
activePieceHashes int
lastRequested map[request]*time.Timer
}
+func (t *Torrent) numConns() int {
+ return len(t.conns)
+}
+
+func (t *Torrent) numReaders() int {
+ return len(t.readers)
+}
+
+func (t *Torrent) readerNowPieces() bitmap.Bitmap {
+ return t._readerNowPieces
+}
+
+func (t *Torrent) readerReadaheadPieces() bitmap.Bitmap {
+ return t._readerReadaheadPieces
+}
+
+func (t *Torrent) ignorePieces() bitmap.Bitmap {
+ ret := t._completedPieces.Copy()
+ ret.Union(t.piecesQueuedForHash)
+ for i := 0; i < t.numPieces(); i++ {
+ if t.piece(i).hashing {
+ ret.Set(i, true)
+ }
+ }
+ return ret
+}
+
+func (t *Torrent) pendingPieces() *prioritybitmap.PriorityBitmap {
+ return &t._pendingPieces
+}
+
func (t *Torrent) tickleReaders() {
t.cl.event.Broadcast()
}
}
func (t *Torrent) pieceComplete(piece pieceIndex) bool {
- return t.completedPieces.Get(bitmap.BitIndex(piece))
+ return t._completedPieces.Get(bitmap.BitIndex(piece))
}
func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
}
func (t *Torrent) bytesLeft() (left int64) {
- bitmap.Flip(t.completedPieces, 0, bitmap.BitIndex(t.numPieces())).IterTyped(func(piece int) bool {
+ bitmap.Flip(t._completedPieces, 0, bitmap.BitIndex(t.numPieces())).IterTyped(func(piece int) bool {
p := &t.pieces[piece]
left += int64(p.length() - p.numDirtyBytes())
return true
}
func (t *Torrent) numPiecesCompleted() (num int) {
- return t.completedPieces.Len()
+ return t._completedPieces.Len()
}
func (t *Torrent) close() (err error) {
func (t *Torrent) bitfield() (bf []bool) {
bf = make([]bool, t.numPieces())
- t.completedPieces.IterTyped(func(piece int) (again bool) {
+ t._completedPieces.IterTyped(func(piece int) (again bool) {
bf[piece] = true
return true
})
}
func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
- t.pieces[pieceIndex].dirtyChunks.Clear()
+ t.pieces[pieceIndex]._dirtyChunks.Clear()
}
func (t *Torrent) pieceLength(piece pieceIndex) pp.Integer {
}
func (t *Torrent) haveAnyPieces() bool {
- return t.completedPieces.Len() != 0
+ return t._completedPieces.Len() != 0
}
func (t *Torrent) haveAllPieces() bool {
if !t.haveInfo() {
return false
}
- return t.completedPieces.Len() == bitmap.BitIndex(t.numPieces())
+ return t._completedPieces.Len() == bitmap.BitIndex(t.numPieces())
}
func (t *Torrent) havePiece(index pieceIndex) bool {
if t.pieceComplete(index) {
return false
}
- if t.pendingPieces.Contains(bitmap.BitIndex(index)) {
+ if t._pendingPieces.Contains(bitmap.BitIndex(index)) {
return true
}
// t.logger.Printf("piece %d not pending", index)
heap.Init(&wcs)
for wcs.Len() != 0 {
c := heap.Pop(&wcs).(*connection)
- if c.stats.ChunksReadWasted.Int64() >= 6 && c.stats.ChunksReadWasted.Int64() > c.stats.ChunksReadUseful.Int64() {
+ if c._stats.ChunksReadWasted.Int64() >= 6 && c._stats.ChunksReadWasted.Int64() > c._stats.ChunksReadUseful.Int64() {
return c
}
// If the connection is in the worst half of the established
}
func (t *Torrent) pieceAllDirty(piece pieceIndex) bool {
- return t.pieces[piece].dirtyChunks.Len() == int(t.pieceNumChunks(piece))
+ return t.pieces[piece]._dirtyChunks.Len() == int(t.pieceNumChunks(piece))
}
func (t *Torrent) readersChanged() {
}
func (t *Torrent) updateReaderPieces() {
- t.readerNowPieces, t.readerReadaheadPieces = t.readerPiecePriorities()
+ t._readerNowPieces, t._readerReadaheadPieces = t.readerPiecePriorities()
}
func (t *Torrent) readerPosChanged(from, to pieceRange) {
newPrio := p.uncachedPriority()
// t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
if newPrio == PiecePriorityNone {
- if !t.pendingPieces.Remove(bitmap.BitIndex(piece)) {
+ if !t._pendingPieces.Remove(bitmap.BitIndex(piece)) {
return
}
} else {
- if !t.pendingPieces.Set(bitmap.BitIndex(piece), newPrio.BitmapPriority()) {
+ if !t._pendingPieces.Set(bitmap.BitIndex(piece), newPrio.BitmapPriority()) {
return
}
}
}
func (t *Torrent) piecePriority(piece pieceIndex) piecePriority {
- prio, ok := t.pendingPieces.GetPriority(bitmap.BitIndex(piece))
+ prio, ok := t._pendingPieces.GetPriority(bitmap.BitIndex(piece))
if !ok {
return PiecePriorityNone
}
changed := cached != uncached
complete := uncached.Complete
p.storageCompletionOk = uncached.Ok
- t.completedPieces.Set(bitmap.BitIndex(piece), complete)
+ t._completedPieces.Set(bitmap.BitIndex(piece), complete)
if complete && len(p.dirtiers) != 0 {
t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
}
if !t.haveInfo() {
return true
}
- return t.pendingPieces.Len() != 0
+ return t._pendingPieces.Len() != 0
}
func appendMissingStrings(old, new []string) (ret []string) {
// Reconcile bytes transferred before connection was associated with a
// torrent.
func (t *Torrent) reconcileHandshakeStats(c *connection) {
- if c.stats != (ConnStats{
+ if c._stats != (ConnStats{
// Handshakes should only increment these fields:
- BytesWritten: c.stats.BytesWritten,
- BytesRead: c.stats.BytesRead,
+ BytesWritten: c._stats.BytesWritten,
+ BytesRead: c._stats.BytesRead,
}) {
panic("bad stats")
}
c.postHandshakeStats(func(cs *ConnStats) {
- cs.BytesRead.Add(c.stats.BytesRead.Int64())
- cs.BytesWritten.Add(c.stats.BytesWritten.Int64())
+ cs.BytesRead.Add(c._stats.BytesRead.Int64())
+ cs.BytesWritten.Add(c._stats.BytesWritten.Int64())
})
c.reconciledHandshakeStats = true
}
t.allStats((*ConnStats).incrementPiecesDirtiedGood)
}
for c := range p.dirtiers {
- c.stats.incrementPiecesDirtiedGood()
+ c._stats.incrementPiecesDirtiedGood()
}
t.clearPieceTouchers(piece)
err := p.Storage().MarkComplete()
t.allStats((*ConnStats).incrementPiecesDirtiedBad)
for c := range p.dirtiers {
// Y u do dis peer?!
- c.stats.incrementPiecesDirtiedBad()
+ c._stats.incrementPiecesDirtiedBad()
}
bannableTouchers := make([]*connection, 0, len(p.dirtiers))
func (t *Torrent) piece(i int) *Piece {
return &t.pieces[i]
}
+
+func (t *Torrent) requestStrategyTorrent() requestStrategyTorrent {
+ return t
+}
+
+func (t *Torrent) duplicateRequestTimeout() time.Duration {
+ return t._duplicateRequestTimeout
+}