"errors"
"fmt"
"io"
+ "math/rand"
+ "net/netip"
"net/url"
"sort"
"strings"
"github.com/anacrolix/chansync"
"github.com/anacrolix/chansync/events"
"github.com/anacrolix/dht/v2"
+ . "github.com/anacrolix/generics"
"github.com/anacrolix/log"
"github.com/anacrolix/missinggo/perf"
- "github.com/anacrolix/missinggo/pubsub"
"github.com/anacrolix/missinggo/slices"
"github.com/anacrolix/missinggo/v2"
"github.com/anacrolix/missinggo/v2/bitmap"
+ "github.com/anacrolix/missinggo/v2/pubsub"
"github.com/anacrolix/multiless"
"github.com/anacrolix/sync"
"github.com/davecgh/go-spew/spew"
"github.com/anacrolix/torrent/common"
"github.com/anacrolix/torrent/metainfo"
pp "github.com/anacrolix/torrent/peer_protocol"
+ request_strategy "github.com/anacrolix/torrent/request-strategy"
"github.com/anacrolix/torrent/segments"
"github.com/anacrolix/torrent/storage"
"github.com/anacrolix/torrent/tracker"
+ typedRoaring "github.com/anacrolix/torrent/typed-roaring"
"github.com/anacrolix/torrent/webseed"
"github.com/anacrolix/torrent/webtorrent"
)
userOnWriteChunkErr func(error)
closed chansync.SetOnce
+ onClose []func()
infoHash metainfo.Hash
pieces []Piece
+
+ // The order pieces are requested if there's no stronger reason like availability or priority.
+ pieceRequestOrder []int
// Values are the piece indices that changed.
- pieceStateChanges *pubsub.PubSub
+ pieceStateChanges pubsub.PubSub[PieceStateChange]
// The size of chunks to request from peers over the wire. This is
// normally 16KiB by convention these days.
chunkSize pp.Integer
chunkPool sync.Pool
// Total length of the torrent in bytes. Stored because it's not O(1) to
// get this from the info dict.
- length *int64
+ _length Option[int64]
// The storage to open when the info dict becomes available.
storageOpener *storage.Client
fileIndex segments.Index
files *[]*File
- webSeeds map[string]*Peer
+ _chunksPerRegularPiece chunkIndexType
+ webSeeds map[string]*Peer
// Active peer connections, running message stream loops. TODO: Make this
// open (not-closed) connections only.
conns map[*PeerConn]struct{}
activePieceHashes int
initialPieceCheckDisabled bool
- // Count of each request across active connections.
- pendingRequests pendingRequests
+ connsWithAllPieces map[*Peer]struct{}
+
+ requestState map[RequestIndex]requestState
// Chunks we've written to since the corresponding piece was last checked.
- dirtyChunks roaring.Bitmap
+ dirtyChunks typedRoaring.Bitmap[RequestIndex]
pex pexState
// Is On when all pieces are complete.
Complete chansync.Flag
+
+ // Torrent sources in use keyed by the source string.
+ activeSources sync.Map
+ sourcesLogger log.Logger
+
+ smartBanCache smartBanCache
+
+ // Large allocations reused between request state updates.
+ requestPieceStates []request_strategy.PieceRequestOrderState
+ requestIndexes []RequestIndex
+}
+
+func (t *Torrent) length() int64 {
+ return t._length.Value
}
-func (t *Torrent) pieceAvailabilityFromPeers(i pieceIndex) (count int) {
+func (t *Torrent) selectivePieceAvailabilityFromPeers(i pieceIndex) (count int) {
+ // This could be done with roaring.BitSliceIndexing.
t.iterPeers(func(peer *Peer) {
+ if _, ok := t.connsWithAllPieces[peer]; ok {
+ return
+ }
if peer.peerHasPiece(i) {
count++
}
return
}
p := t.piece(i)
- if p.availability <= 0 {
- panic(p.availability)
+ if p.relativeAvailability <= 0 {
+ panic(p.relativeAvailability)
}
- p.availability--
+ p.relativeAvailability--
+ t.updatePieceRequestOrder(i)
}
func (t *Torrent) incPieceAvailability(i pieceIndex) {
// If we don't the info, this should be reconciled when we do.
if t.haveInfo() {
p := t.piece(i)
- p.availability++
+ p.relativeAvailability++
+ t.updatePieceRequestOrder(i)
}
}
}
func (t *Torrent) pieceCompleteUncached(piece pieceIndex) storage.Completion {
+ if t.storage == nil {
+ return storage.Completion{Complete: false, Ok: true}
+ }
return t.pieces[piece].Storage().Completion()
}
}
func (t *Torrent) appendUnclosedConns(ret []*PeerConn) []*PeerConn {
+ return t.appendConns(ret, func(conn *PeerConn) bool {
+ return !conn.closed.IsSet()
+ })
+}
+
+func (t *Torrent) appendConns(ret []*PeerConn, f func(*PeerConn) bool) []*PeerConn {
for c := range t.conns {
- if !c.closed.IsSet() {
+ if f(c) {
ret = append(ret, c)
}
}
beginFile := pieceFirstFileIndex(piece.torrentBeginOffset(), files)
endFile := pieceEndFileIndex(piece.torrentEndOffset(), files)
piece.files = files[beginFile:endFile]
- piece.undirtiedChunksIter = undirtiedChunksIter{
- TorrentDirtyChunks: &t.dirtyChunks,
- StartRequestIndex: piece.requestIndexOffset(),
- EndRequestIndex: piece.requestIndexOffset() + piece.numChunks(),
- }
}
}
for _, f := range t.info.UpvertedFiles() {
l += f.Length
}
- t.length = &l
+ t._length = Some(l)
}
// TODO: This shouldn't fail for storage reasons. Instead we should handle storage failure
t.nameMu.Lock()
t.info = info
t.nameMu.Unlock()
+ t._chunksPerRegularPiece = chunkIndexType((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
t.updateComplete()
t.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
t.displayName = "" // Save a few bytes lol.
return nil
}
+func (t *Torrent) pieceRequestOrderKey(i int) request_strategy.PieceRequestOrderKey {
+ return request_strategy.PieceRequestOrderKey{
+ InfoHash: t.infoHash,
+ Index: i,
+ }
+}
+
// This seems to be all the follow-up tasks after info is set, that can't fail.
func (t *Torrent) onSetInfo() {
+ t.pieceRequestOrder = rand.Perm(t.numPieces())
+ t.initPieceRequestOrder()
+ MakeSliceWithLength(&t.requestPieceStates, t.numPieces())
for i := range t.pieces {
p := &t.pieces[i]
- // Need to add availability before updating piece completion, as that may result in conns
+ // Need to add relativeAvailability before updating piece completion, as that may result in conns
// being dropped.
- if p.availability != 0 {
- panic(p.availability)
+ if p.relativeAvailability != 0 {
+ panic(p.relativeAvailability)
}
- p.availability = int64(t.pieceAvailabilityFromPeers(i))
- t.updatePieceCompletion(pieceIndex(i))
+ p.relativeAvailability = t.selectivePieceAvailabilityFromPeers(i)
+ t.addRequestOrderPiece(i)
+ t.updatePieceCompletion(i)
if !t.initialPieceCheckDisabled && !p.storageCompletionOk {
// t.logger.Printf("piece %s completion unknown, queueing check", p)
- t.queuePieceCheck(pieceIndex(i))
+ t.queuePieceCheck(i)
}
}
t.cl.event.Broadcast()
close(t.gotMetainfoC)
t.updateWantPeersEvent()
- t.pendingRequests.Init(t.numRequests())
+ t.requestState = make(map[RequestIndex]requestState)
t.tryCreateMorePieceHashers()
t.iterPeers(func(p *Peer) {
p.onGotInfo(t.info)
return
}
if uint32(size) > maxMetadataSize {
- return errors.New("bad size")
+ return log.WithLevel(log.Warning, errors.New("bad size"))
}
if len(t.metadataBytes) == size {
return
t.nameMu.RLock()
defer t.nameMu.RUnlock()
if t.haveInfo() {
- return t.info.Name
+ return t.info.BestName()
}
if t.displayName != "" {
return t.displayName
}
type pieceAvailabilityRun struct {
- count pieceIndex
- availability int64
+ Count pieceIndex
+ Availability int
}
func (me pieceAvailabilityRun) String() string {
- return fmt.Sprintf("%v(%v)", me.count, me.availability)
+ return fmt.Sprintf("%v(%v)", me.Count, me.Availability)
}
func (t *Torrent) pieceAvailabilityRuns() (ret []pieceAvailabilityRun) {
rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
- ret = append(ret, pieceAvailabilityRun{availability: el.(int64), count: int(count)})
+ ret = append(ret, pieceAvailabilityRun{Availability: el.(int), Count: int(count)})
})
for i := range t.pieces {
- rle.Append(t.pieces[i].availability, 1)
+ rle.Append(t.pieces[i].availability(), 1)
}
rle.Flush()
return
}
+func (t *Torrent) pieceAvailabilityFrequencies() (freqs []int) {
+ freqs = make([]int, t.numActivePeers()+1)
+ for i := range t.pieces {
+ freqs[t.piece(i).availability()]++
+ }
+ return
+}
+
func (t *Torrent) pieceStateRuns() (ret PieceStateRuns) {
rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
ret = append(ret, PieceStateRun{
if t.info != nil {
fmt.Fprintf(w, "Num Pieces: %d (%d completed)\n", t.numPieces(), t.numPiecesCompleted())
fmt.Fprintf(w, "Piece States: %s\n", t.pieceStateRuns())
- fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
- for _, run := range t.pieceAvailabilityRuns() {
- ret = append(ret, run.String())
- }
- return
- }(), " "))
+ // Generates a huge, unhelpful listing when piece availability is very scattered. Prefer
+ // availability frequencies instead.
+ if false {
+ fmt.Fprintf(w, "Piece availability: %v\n", strings.Join(func() (ret []string) {
+ for _, run := range t.pieceAvailabilityRuns() {
+ ret = append(ret, run.String())
+ }
+ return
+ }(), " "))
+ }
+ fmt.Fprintf(w, "Piece availability frequency: %v\n", strings.Join(
+ func() (ret []string) {
+ for avail, freq := range t.pieceAvailabilityFrequencies() {
+ if freq == 0 {
+ continue
+ }
+ ret = append(ret, fmt.Sprintf("%v: %v", avail, freq))
+ }
+ return
+ }(),
+ ", "))
}
fmt.Fprintf(w, "Reader Pieces:")
t.forReaderOffsetPieces(func(begin, end pieceIndex) (again bool) {
}
func (t *Torrent) numPieces() pieceIndex {
- return pieceIndex(t.info.NumPieces())
+ return t.info.NumPieces()
}
func (t *Torrent) numPiecesCompleted() (num pieceIndex) {
}
func (t *Torrent) close(wg *sync.WaitGroup) (err error) {
- t.closed.Set()
+ if !t.closed.Set() {
+ err = errors.New("already closed")
+ return
+ }
+ for _, f := range t.onClose {
+ f()
+ }
if t.storage != nil {
wg.Add(1)
go func() {
t.iterPeers(func(p *Peer) {
p.close()
})
+ if t.storage != nil {
+ t.deletePieceRequestOrder()
+ }
+ for i := range t.pieces {
+ p := t.piece(i)
+ if p.relativeAvailability != 0 {
+ panic(fmt.Sprintf("piece %v has relative availability %v", i, p.relativeAvailability))
+ }
+ }
t.pex.Reset()
t.cl.event.Broadcast()
t.pieceStateChanges.Close()
}
func (t *Torrent) requestOffset(r Request) int64 {
- return torrentRequestOffset(*t.length, int64(t.usualPieceSize()), r)
+ return torrentRequestOffset(t.length(), int64(t.usualPieceSize()), r)
}
// Return the request that would include the given offset into the torrent data. Returns !ok if
// there is no such request.
func (t *Torrent) offsetRequest(off int64) (req Request, ok bool) {
- return torrentOffsetRequest(*t.length, t.info.PieceLength, int64(t.chunkSize), off)
+ return torrentOffsetRequest(t.length(), t.info.PieceLength, int64(t.chunkSize), off)
}
func (t *Torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
return chunkIndexType((t.pieceLength(piece) + t.chunkSize - 1) / t.chunkSize)
}
-func (t *Torrent) chunksPerRegularPiece() uint32 {
- return uint32((pp.Integer(t.usualPieceSize()) + t.chunkSize - 1) / t.chunkSize)
+func (t *Torrent) chunksPerRegularPiece() chunkIndexType {
+ return t._chunksPerRegularPiece
}
-func (t *Torrent) numRequests() RequestIndex {
+func (t *Torrent) numChunks() RequestIndex {
if t.numPieces() == 0 {
return 0
}
- return uint32(t.numPieces()-1)*t.chunksPerRegularPiece() + t.pieceNumChunks(t.numPieces()-1)
+ return RequestIndex(t.numPieces()-1)*t.chunksPerRegularPiece() + t.pieceNumChunks(t.numPieces()-1)
}
func (t *Torrent) pendAllChunkSpecs(pieceIndex pieceIndex) {
return 0
}
if piece == t.numPieces()-1 {
- ret := pp.Integer(*t.length % t.info.PieceLength)
+ ret := pp.Integer(t.length() % t.info.PieceLength)
if ret != 0 {
return ret
}
return pp.Integer(t.info.PieceLength)
}
-func (t *Torrent) hashPiece(piece pieceIndex) (ret metainfo.Hash, err error) {
+func (t *Torrent) smartBanBlockCheckingWriter(piece pieceIndex) *blockCheckingWriter {
+ return &blockCheckingWriter{
+ cache: &t.smartBanCache,
+ requestIndex: t.pieceRequestIndexOffset(piece),
+ chunkSize: t.chunkSize.Int(),
+ }
+}
+
+func (t *Torrent) hashPiece(piece pieceIndex) (
+ ret metainfo.Hash,
+ // These are peers that sent us blocks that differ from what we hash here.
+ differingPeers map[bannableAddr]struct{},
+ err error,
+) {
p := t.piece(piece)
p.waitNoPendingWrites()
storagePiece := t.pieces[piece].Storage()
hash := pieceHash.New()
const logPieceContents = false
+ smartBanWriter := t.smartBanBlockCheckingWriter(piece)
+ writers := []io.Writer{hash, smartBanWriter}
+ var examineBuf bytes.Buffer
if logPieceContents {
- var examineBuf bytes.Buffer
- _, err = storagePiece.WriteTo(io.MultiWriter(hash, &examineBuf))
- log.Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
- } else {
- _, err = storagePiece.WriteTo(hash)
+ writers = append(writers, &examineBuf)
}
+ _, err = storagePiece.WriteTo(io.MultiWriter(writers...))
+ if logPieceContents {
+ t.logger.WithDefaultLevel(log.Debug).Printf("hashed %q with copy err %v", examineBuf.Bytes(), err)
+ }
+ smartBanWriter.Flush()
+ differingPeers = smartBanWriter.badPeers
missinggo.CopyExact(&ret, hash.Sum(nil))
return
}
func (t *Torrent) haveAnyPieces() bool {
- return t._completedPieces.GetCardinality() != 0
+ return !t._completedPieces.IsEmpty()
}
func (t *Torrent) haveAllPieces() bool {
// conns (which is a map).
var peerConnSlices sync.Pool
+func getPeerConnSlice(cap int) []*PeerConn {
+ getInterface := peerConnSlices.Get()
+ if getInterface == nil {
+ return make([]*PeerConn, 0, cap)
+ } else {
+ return getInterface.([]*PeerConn)[:0]
+ }
+}
+
// The worst connection is one that hasn't been sent, or sent anything useful for the longest. A bad
// connection is one that usually sends us unwanted pieces, or has been in the worse half of the
// established connections for more than a minute. This is O(n log n). If there was a way to not
// consider the position of a conn relative to the total number, it could be reduced to O(n).
func (t *Torrent) worstBadConn() (ret *PeerConn) {
- var sl []*PeerConn
- getInterface := peerConnSlices.Get()
- if getInterface == nil {
- sl = make([]*PeerConn, 0, len(t.conns))
- } else {
- sl = getInterface.([]*PeerConn)[:0]
- }
- sl = t.appendUnclosedConns(sl)
- defer peerConnSlices.Put(sl)
- wcs := worseConnSlice{sl}
+ wcs := worseConnSlice{conns: t.appendUnclosedConns(getPeerConnSlice(len(t.conns)))}
+ defer peerConnSlices.Put(wcs.conns)
+ wcs.initKeys()
heap.Init(&wcs)
for wcs.Len() != 0 {
c := heap.Pop(&wcs).(*PeerConn)
func (t *Torrent) piecePriorityChanged(piece pieceIndex, reason string) {
if t._pendingPieces.Contains(uint32(piece)) {
t.iterPeers(func(c *Peer) {
- if c.actualRequestState.Interested {
- return
- }
+ // if c.requestState.Interested {
+ // return
+ // }
if !c.isLowOnRequests() {
return
}
if !c.peerHasPiece(piece) {
return
}
+ if c.requestState.Interested && c.peerChoking && !c.peerAllowedFast.Contains(piece) {
+ return
+ }
c.updateRequests(reason)
})
}
}
func (t *Torrent) updatePiecePriority(piece pieceIndex, reason string) {
+ if !t.closed.IsSet() {
+ // It would be possible to filter on pure-priority changes here to avoid churning the piece
+ // request order.
+ t.updatePieceRequestOrder(piece)
+ }
p := &t.pieces[piece]
newPrio := p.uncachedPriority()
// t.logger.Printf("torrent %p: piece %d: uncached priority: %v", t, piece, newPrio)
// Returns the range of pieces [begin, end) that contains the extent of bytes.
func (t *Torrent) byteRegionPieces(off, size int64) (begin, end pieceIndex) {
- if off >= *t.length {
+ if off >= t.length() {
return
}
if off < 0 {
}
func (t *Torrent) pendRequest(req RequestIndex) {
- t.piece(int(req / t.chunksPerRegularPiece())).pendChunkIndex(req % t.chunksPerRegularPiece())
+ t.piece(t.pieceIndexOfRequestIndex(req)).pendChunkIndex(req % t.chunksPerRegularPiece())
}
func (t *Torrent) pieceCompletionChanged(piece pieceIndex, reason string) {
} else {
t._completedPieces.Remove(x)
}
+ p.t.updatePieceRequestOrder(piece)
t.updateComplete()
if complete && len(p.dirtiers) != 0 {
t.logger.Printf("marked piece %v complete but still has dirtiers", piece)
}
if changed {
- log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).SetLevel(log.Debug).Log(t.logger)
+ log.Fstr("piece %d completion changed: %+v -> %+v", piece, cached, uncached).LogLevel(log.Debug, t.logger)
t.pieceCompletionChanged(piece, "Torrent.updatePieceCompletion")
}
return changed
if !t.haveInfo() {
return 0
}
- return *t.length - t.bytesLeft()
+ return t.length() - t.bytesLeft()
}
func (t *Torrent) SetInfoBytes(b []byte) (err error) {
}
}
torrent.Add("deleted connections", 1)
- c.deleteAllRequests()
+ c.deleteAllRequests("Torrent.deletePeerConn")
t.assertPendingRequests()
+ if t.numActivePeers() == 0 && len(t.connsWithAllPieces) != 0 {
+ panic(t.connsWithAllPieces)
+ }
return
}
func (t *Torrent) decPeerPieceAvailability(p *Peer) {
+ if t.deleteConnWithAllPieces(p) {
+ return
+ }
if !t.haveInfo() {
return
}
- p.newPeerPieces().Iterate(func(i uint32) bool {
+ p.peerPieces().Iterate(func(i uint32) bool {
p.t.decPieceAvailability(pieceIndex(i))
return true
})
}
// var actual pendingRequests
// if t.haveInfo() {
- // actual.m = make([]int, t.numRequests())
+ // actual.m = make([]int, t.numChunks())
// }
// t.iterPeers(func(p *Peer) {
- // p.actualRequestState.Requests.Iterate(func(x uint32) bool {
+ // p.requestState.Requests.Iterate(func(x uint32) bool {
// actual.Inc(x)
// return true
// })
dcc webtorrent.DataChannelContext,
) {
defer c.Close()
+ netConn := webrtcNetConn{
+ ReadWriteCloser: c,
+ DataChannelContext: dcc,
+ }
+ peerRemoteAddr := netConn.RemoteAddr()
+ //t.logger.Levelf(log.Critical, "onWebRtcConn remote addr: %v", peerRemoteAddr)
+ if t.cl.badPeerAddr(peerRemoteAddr) {
+ return
+ }
+ localAddrIpPort := missinggo.IpPortFromNetAddr(netConn.LocalAddr())
pc, err := t.cl.initiateProtocolHandshakes(
context.Background(),
- webrtcNetConn{c, dcc},
+ netConn,
t,
- dcc.LocalOffered,
false,
- webrtcNetAddr{dcc.Remote},
- webrtcNetwork,
- fmt.Sprintf("webrtc offer_id %x", dcc.OfferId),
+ newConnectionOpts{
+ outgoing: dcc.LocalOffered,
+ remoteAddr: peerRemoteAddr,
+ localPublicAddr: localAddrIpPort,
+ network: webrtcNetwork,
+ connString: fmt.Sprintf("webrtc offer_id %x: %v", dcc.OfferId, regularNetConnPeerConnConnString(netConn)),
+ },
)
if err != nil {
t.logger.WithDefaultLevel(log.Error).Printf("error in handshaking webrtc connection: %v", err)
defer t.cl.unlock()
err = t.cl.runHandshookConn(pc, t)
if err != nil {
- t.logger.WithDefaultLevel(log.Critical).Printf("error running handshook webrtc conn: %v", err)
+ t.logger.WithDefaultLevel(log.Debug).Printf("error running handshook webrtc conn: %v", err)
}
}
func (t *Torrent) logRunHandshookConn(pc *PeerConn, logAll bool, level log.Level) {
err := t.cl.runHandshookConn(pc, t)
if err != nil || logAll {
- t.logger.WithDefaultLevel(level).Printf("error running handshook conn: %v", err)
+ t.logger.WithDefaultLevel(level).Levelf(log.ErrorLevel(err), "error running handshook conn: %v", err)
}
}
}
func (t *Torrent) startWebsocketAnnouncer(u url.URL) torrentTrackerAnnouncer {
- wtc, release := t.cl.websocketTrackers.Get(u.String())
- go func() {
- <-t.closed.Done()
- release()
- }()
+ wtc, release := t.cl.websocketTrackers.Get(u.String(), t.infoHash)
+ // This needs to run before the Torrent is dropped from the Client, to prevent a new webtorrent.TrackerClient for
+ // the same info hash before the old one is cleaned up.
+ t.onClose = append(t.onClose, release)
wst := websocketTrackerStatus{u, wtc}
go func() {
err := wtc.Announce(tracker.Started, t.infoHash)
Event: event,
NumWant: func() int32 {
if t.wantPeers() && len(t.cl.dialers) > 0 {
- return -1
+ return 200 // Win has UDP packet limit. See: https://github.com/anacrolix/torrent/issues/764
} else {
return 0
}
if t.closed.IsSet() {
return
}
- if !t.wantPeers() {
+ // We're also announcing ourselves as a listener, so we don't just want peer addresses.
+ // TODO: We can include the announce_peer step depending on whether we can receive
+ // inbound connections. We should probably only announce once every 15 mins too.
+ if !t.wantConns() {
goto wait
}
// TODO: Determine if there's a listener on the port we're announcing.
if !t.cl.config.DropDuplicatePeerIds {
continue
}
- if left, ok := c.hasPreferredNetworkOver(c0); ok && left {
+ if c.hasPreferredNetworkOver(c0) {
c0.close()
t.deletePeerConn(c0)
} else {
if t.closed.IsSet() {
return false
}
- if len(t.conns) >= t.maxEstablishedConns && t.worstBadConn() == nil {
+ if !t.needData() && (!t.seeding() || !t.haveAnyPieces()) {
return false
}
- if t.seeding() && t.haveAnyPieces() {
- return true
- }
- return t.needData()
+ return len(t.conns) < t.maxEstablishedConns || t.worstBadConn() != nil
}
func (t *Torrent) SetMaxEstablishedConns(max int) (oldMax int) {
defer t.cl.unlock()
oldMax = t.maxEstablishedConns
t.maxEstablishedConns = max
- wcs := slices.HeapInterface(slices.FromMapKeys(t.conns), func(l, r *PeerConn) bool {
- return worseConn(&l.Peer, &r.Peer)
- })
+ wcs := worseConnSlice{
+ conns: t.appendConns(nil, func(*PeerConn) bool {
+ return true
+ }),
+ }
+ wcs.initKeys()
+ heap.Init(&wcs)
for len(t.conns) > t.maxEstablishedConns && wcs.Len() > 0 {
- t.dropConnection(wcs.Pop().(*PeerConn))
+ t.dropConnection(heap.Pop(&wcs).(*PeerConn))
}
t.openNewConns()
return oldMax
}
func (t *Torrent) pieceHashed(piece pieceIndex, passed bool, hashIoErr error) {
- t.logger.Log(log.Fstr("hashed piece %d (passed=%t)", piece, passed).SetLevel(log.Debug))
+ t.logger.LazyLog(log.Debug, func() log.Msg {
+ return log.Fstr("hashed piece %d (passed=%t)", piece, passed)
+ })
p := t.piece(piece)
p.numVerifies++
t.cl.event.Broadcast()
} else {
log.Fmsg(
"piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
- ).AddValues(t, p).SetLevel(log.Debug).Log(t.logger)
+ ).AddValues(t, p).LogLevel(
+
+ log.Debug, t.logger)
+
pieceHashedNotCorrect.Add(1)
}
}
c._stats.incrementPiecesDirtiedGood()
}
t.clearPieceTouchers(piece)
+ hasDirty := p.hasDirtyChunks()
t.cl.unlock()
+ if hasDirty {
+ p.Flush() // You can be synchronous here!
+ }
err := p.Storage().MarkComplete()
if err != nil {
t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)
if len(bannableTouchers) >= 1 {
c := bannableTouchers[0]
- t.cl.banPeerIP(c.remoteIp())
- c.drop()
+ if len(bannableTouchers) != 1 {
+ t.logger.Levelf(log.Warning, "would have banned %v for touching piece %v after failed piece check", c.remoteIp(), piece)
+ } else {
+ // Turns out it's still useful to ban peers like this because if there's only a
+ // single peer for a piece, and we never progress that piece to completion, we
+ // will never smart-ban them. Discovered in
+ // https://github.com/anacrolix/torrent/issues/715.
+ t.logger.Levelf(log.Warning, "banning %v for being sole dirtier of piece %v after failed piece check", c, piece)
+ c.ban()
+ }
}
}
t.onIncompletePiece(piece)
}
func (t *Torrent) cancelRequestsForPiece(piece pieceIndex) {
- // TODO: Make faster
- for cn := range t.conns {
- cn.tickleWriter()
+ start := t.pieceRequestIndexOffset(piece)
+ end := start + t.pieceNumChunks(piece)
+ for ri := start; ri < end; ri++ {
+ t.cancelRequest(ri)
}
}
return
}
+func (t *Torrent) dropBannedPeers() {
+ t.iterPeers(func(p *Peer) {
+ remoteIp := p.remoteIp()
+ if remoteIp == nil {
+ if p.bannableAddr.Ok {
+ t.logger.WithDefaultLevel(log.Debug).Printf("can't get remote ip for peer %v", p)
+ }
+ return
+ }
+ netipAddr := netip.MustParseAddr(remoteIp.String())
+ if Some(netipAddr) != p.bannableAddr {
+ t.logger.WithDefaultLevel(log.Debug).Printf(
+ "peer remote ip does not match its bannable addr [peer=%v, remote ip=%v, bannable addr=%v]",
+ p, remoteIp, p.bannableAddr)
+ }
+ if _, ok := t.cl.badPeerIPs[netipAddr]; ok {
+ // Should this be a close?
+ p.drop()
+ t.logger.WithDefaultLevel(log.Debug).Printf("dropped %v for banned remote IP %v", p, netipAddr)
+ }
+ })
+}
+
func (t *Torrent) pieceHasher(index pieceIndex) {
p := t.piece(index)
- sum, copyErr := t.hashPiece(index)
+ sum, failedPeers, copyErr := t.hashPiece(index)
correct := sum == *p.hash
switch copyErr {
case nil, io.EOF:
t.storageLock.RUnlock()
t.cl.lock()
defer t.cl.unlock()
+ if correct {
+ for peer := range failedPeers {
+ t.cl.banPeerIP(peer.AsSlice())
+ t.logger.WithDefaultLevel(log.Debug).Printf("smart banned %v for piece %v", peer, index)
+ }
+ t.dropBannedPeers()
+ for ri := t.pieceRequestIndexOffset(index); ri < t.pieceRequestIndexOffset(index+1); ri++ {
+ t.smartBanCache.ForgetBlock(ri)
+ }
+ }
p.hashing = false
t.pieceHashed(index, correct, copyErr)
t.updatePiecePriority(index, "Torrent.pieceHasher")
defer t.cl.unlock()
t.dataUploadDisallowed = true
for c := range t.conns {
+ // TODO: This doesn't look right. Shouldn't we tickle writers to choke peers or something instead?
c.updateRequests("disallow data upload")
}
}
return &t.cl.config.Callbacks
}
-func (t *Torrent) addWebSeed(url string) {
+type AddWebSeedsOpt func(*webseed.Client)
+
+// Sets the WebSeed trailing path escaper for a webseed.Client.
+func WebSeedPathEscaper(custom webseed.PathEscaper) AddWebSeedsOpt {
+ return func(c *webseed.Client) {
+ c.PathEscaper = custom
+ }
+}
+
+func (t *Torrent) AddWebSeeds(urls []string, opts ...AddWebSeedsOpt) {
+ t.cl.lock()
+ defer t.cl.unlock()
+ for _, u := range urls {
+ t.addWebSeed(u, opts...)
+ }
+}
+
+func (t *Torrent) addWebSeed(url string, opts ...AddWebSeedsOpt) {
if t.cl.config.DisableWebseeds {
return
}
if _, ok := t.webSeeds[url]; ok {
return
}
- // I don't think Go http supports pipelining requests. However we can have more ready to go
+ // I don't think Go http supports pipelining requests. However, we can have more ready to go
// right away. This value should be some multiple of the number of connections to a host. I
- // would expect that double maxRequests plus a bit would be appropriate.
- const maxRequests = 32
+ // would expect that double maxRequests plus a bit would be appropriate. This value is based on
+ // downloading Sintel (08ada5a7a6183aae1e09d831df6748d566095a10) from
+ // "https://webtorrent.io/torrents/".
+ const maxRequests = 16
ws := webseedPeer{
peer: Peer{
t: t,
// requests mark more often, so recomputation is probably sooner than with regular peer
// conns. ~4x maxRequests would be about right.
PeerMaxRequests: 128,
- RemoteAddr: remoteAddrFromUrl(url),
- callbacks: t.callbacks(),
+ // TODO: Set ban prefix?
+ RemoteAddr: remoteAddrFromUrl(url),
+ callbacks: t.callbacks(),
},
client: webseed.Client{
- HttpClient: t.cl.webseedHttpClient,
+ HttpClient: t.cl.httpClient,
Url: url,
+ ResponseBodyWrapper: func(r io.Reader) io.Reader {
+ return &rateLimitedReader{
+ l: t.cl.config.DownloadRateLimiter,
+ r: r,
+ }
+ },
},
activeRequests: make(map[Request]webseed.Request, maxRequests),
maxRequests: maxRequests,
}
+ ws.peer.initRequestState()
+ for _, opt := range opts {
+ opt(&ws.client)
+ }
ws.peer.initUpdateRequestsTimer()
ws.requesterCond.L = t.cl.locker()
for i := 0; i < maxRequests; i += 1 {
- go ws.requester()
+ go ws.requester(i)
}
for _, f := range t.callbacks().NewPeer {
f(&ws.peer)
}
func (t *Torrent) requestIndexToRequest(ri RequestIndex) Request {
- index := ri / t.chunksPerRegularPiece()
+ index := t.pieceIndexOfRequestIndex(ri)
return Request{
pp.Integer(index),
- t.piece(int(index)).chunkIndexSpec(ri % t.chunksPerRegularPiece()),
+ t.piece(index).chunkIndexSpec(ri % t.chunksPerRegularPiece()),
}
}
func (t *Torrent) requestIndexFromRequest(r Request) RequestIndex {
- return t.pieceRequestIndexOffset(pieceIndex(r.Index)) + uint32(r.Begin/t.chunkSize)
+ return t.pieceRequestIndexOffset(pieceIndex(r.Index)) + RequestIndex(r.Begin/t.chunkSize)
}
func (t *Torrent) pieceRequestIndexOffset(piece pieceIndex) RequestIndex {
func (t *Torrent) updateComplete() {
t.Complete.SetBool(t.haveAllPieces())
}
+
+func (t *Torrent) cancelRequest(r RequestIndex) *Peer {
+ p := t.requestingPeer(r)
+ if p != nil {
+ p.cancel(r)
+ }
+ // TODO: This is a check that an old invariant holds. It can be removed after some testing.
+ //delete(t.pendingRequests, r)
+ if _, ok := t.requestState[r]; ok {
+ panic("expected request state to be gone")
+ }
+ return p
+}
+
+func (t *Torrent) requestingPeer(r RequestIndex) *Peer {
+ return t.requestState[r].peer
+}
+
+func (t *Torrent) addConnWithAllPieces(p *Peer) {
+ if t.connsWithAllPieces == nil {
+ t.connsWithAllPieces = make(map[*Peer]struct{}, t.maxEstablishedConns)
+ }
+ t.connsWithAllPieces[p] = struct{}{}
+}
+
+func (t *Torrent) deleteConnWithAllPieces(p *Peer) bool {
+ _, ok := t.connsWithAllPieces[p]
+ delete(t.connsWithAllPieces, p)
+ return ok
+}
+
+func (t *Torrent) numActivePeers() int {
+ return len(t.conns) + len(t.webSeeds)
+}
+
+func (t *Torrent) hasStorageCap() bool {
+ f := t.storage.Capacity
+ if f == nil {
+ return false
+ }
+ _, ok := (*f)()
+ return ok
+}
+
+func (t *Torrent) pieceIndexOfRequestIndex(ri RequestIndex) pieceIndex {
+ return pieceIndex(ri / t.chunksPerRegularPiece())
+}
+
+func (t *Torrent) iterUndirtiedRequestIndexesInPiece(
+ reuseIter *typedRoaring.Iterator[RequestIndex],
+ piece pieceIndex,
+ f func(RequestIndex),
+) {
+ reuseIter.Initialize(&t.dirtyChunks)
+ pieceRequestIndexOffset := t.pieceRequestIndexOffset(piece)
+ iterBitmapUnsetInRange(
+ reuseIter,
+ pieceRequestIndexOffset, pieceRequestIndexOffset+t.pieceNumChunks(piece),
+ f,
+ )
+}
+
+type requestState struct {
+ peer *Peer
+ when time.Time
+}