// Currently doesn't really queue, but should in the future.
func (cl *Client) queuePieceCheck(t *torrent, pieceIndex int) {
- piece := &t.Pieces[pieceIndex]
+ piece := &t.pieces[pieceIndex]
if piece.QueuedForHash {
return
}
// Queue a piece check if one isn't already queued, and the piece has never
// been checked before.
func (cl *Client) queueFirstHash(t *torrent, piece int) {
- p := &t.Pieces[piece]
+ p := &t.pieces[piece]
if p.EverHashed || p.Hashing || p.QueuedForHash || t.pieceComplete(piece) {
return
}
log.Printf("outbound connect to %s blocked by IP blocklist rule %s", peer.IP, r)
return
}
- t.HalfOpen[addr] = struct{}{}
+ t.halfOpen[addr] = struct{}{}
go me.outgoingConnection(t, addr, peer.Source)
}
func (me *Client) dialTimeout(t *torrent) time.Duration {
me.mu.Lock()
- pendingPeers := len(t.Peers)
+ pendingPeers := len(t.peers)
me.mu.Unlock()
return reducedDialTimeout(nominalDialTimeout, me.halfOpenLimit, pendingPeers)
}
}
func (me *Client) noLongerHalfOpen(t *torrent, addr string) {
- if _, ok := t.HalfOpen[addr]; !ok {
+ if _, ok := t.halfOpen[addr]; !ok {
panic("invariant broken")
}
- delete(t.HalfOpen, addr)
+ delete(t.halfOpen, addr)
me.openNewConns(t)
}
func (cl *Client) completedMetadata(t *torrent) {
h := sha1.New()
- h.Write(t.MetaData)
+ h.Write(t.metadataBytes)
var ih metainfo.InfoHash
missinggo.CopyExact(&ih, h.Sum(nil))
if ih != t.InfoHash {
return
}
var info metainfo.Info
- err := bencode.Unmarshal(t.MetaData, &info)
+ err := bencode.Unmarshal(t.metadataBytes, &info)
if err != nil {
log.Printf("error unmarshalling metadata: %s", err)
t.invalidateMetadata()
}
// TODO(anacrolix): If this fails, I think something harsher should be
// done.
- err = cl.setMetaData(t, &info, t.MetaData)
+ err = cl.setMetaData(t, &info, t.metadataBytes)
if err != nil {
log.Printf("error setting metadata: %s", err)
t.invalidateMetadata()
break
}
start := (1 << 14) * piece
- c.Post(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[start:start+t.metadataPieceSize(piece)]))
+ c.Post(t.newMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.metadataBytes[start:start+t.metadataPieceSize(piece)]))
case pp.RejectMetadataExtensionMsgType:
default:
err = errors.New("unknown msg_type value")
func (me *Client) sendChunk(t *torrent, c *connection, r request) error {
// Count the chunk being sent, even if it isn't.
b := make([]byte, r.Length)
- p := t.Info.Piece(int(r.Index))
+ p := t.info.Piece(int(r.Index))
n, err := t.readAt(b, p.Offset()+int64(r.Begin))
if n != len(b) {
if err == nil {
// Returns true if connection is removed from torrent.Conns.
func (me *Client) deleteConnection(t *torrent, c *connection) bool {
- for i0, _c := range t.Conns {
+ for i0, _c := range t.conns {
if _c != c {
continue
}
- i1 := len(t.Conns) - 1
+ i1 := len(t.conns) - 1
if i0 != i1 {
- t.Conns[i0] = t.Conns[i1]
+ t.conns[i0] = t.conns[i1]
}
- t.Conns = t.Conns[:i1]
+ t.conns = t.conns[:i1]
return true
}
return false
if !me.wantConns(t) {
return false
}
- for _, c0 := range t.Conns {
+ for _, c0 := range t.conns {
if c.PeerID == c0.PeerID {
// Already connected to a client with that ID.
duplicateClientConns.Add(1)
return false
}
}
- if len(t.Conns) >= socketsPerTorrent {
+ if len(t.conns) >= socketsPerTorrent {
c := t.worstBadConn(me)
if c == nil {
return false
c.Close()
me.deleteConnection(t, c)
}
- if len(t.Conns) >= socketsPerTorrent {
- panic(len(t.Conns))
+ if len(t.conns) >= socketsPerTorrent {
+ panic(len(t.conns))
}
- t.Conns = append(t.Conns, c)
+ t.conns = append(t.conns, c)
c.t = t
return true
}
if !me.seeding(t) && !t.needData() {
return false
}
- if len(t.Conns) < socketsPerTorrent {
+ if len(t.conns) < socketsPerTorrent {
return true
}
return t.worstBadConn(me) != nil
return
default:
}
- for len(t.Peers) != 0 {
+ for len(t.peers) != 0 {
if !me.wantConns(t) {
return
}
- if len(t.HalfOpen) >= me.halfOpenLimit {
+ if len(t.halfOpen) >= me.halfOpenLimit {
return
}
var (
k peersKey
p Peer
)
- for k, p = range t.Peers {
+ for k, p = range t.peers {
break
}
- delete(t.Peers, k)
+ delete(t.peers, k)
me.initiateConn(p, t)
}
t.wantPeers.Broadcast()
t = &torrent{
InfoHash: ih,
chunkSize: defaultChunkSize,
- Peers: make(map[peersKey]Peer),
+ peers: make(map[peersKey]Peer),
closing: make(chan struct{}),
ceasingNetworking: make(chan struct{}),
gotMetainfo: make(chan struct{}),
- HalfOpen: make(map[string]struct{}),
+ halfOpen: make(map[string]struct{}),
pieceStateChanges: pubsub.NewPubSub(),
}
return
}
func (t *torrent) addTrackers(announceList [][]string) {
- newTrackers := copyTrackers(t.Trackers)
+ newTrackers := copyTrackers(t.trackers)
for tierIndex, tier := range announceList {
if tierIndex < len(newTrackers) {
newTrackers[tierIndex] = mergeTier(newTrackers[tierIndex], tier)
}
shuffleTier(newTrackers[tierIndex])
}
- t.Trackers = newTrackers
+ t.trackers = newTrackers
}
// Don't call this before the info is available.
if !t.haveInfo() {
return 0
}
- return t.Info.TotalLength() - t.bytesLeft()
+ return t.info.TotalLength() - t.bytesLeft()
}
// A file-like handle to some torrent data resource.
}
// Try to merge in info we have on the torrent. Any err left will
// terminate the function.
- if t.Info == nil {
+ if t.info == nil {
if spec.Info != nil {
err = cl.setMetaData(t, &spec.Info.Info, spec.Info.Bytes)
} else {
return false
default:
}
- if len(t.Peers) > torrentPeersLowWater {
+ if len(t.peers) > torrentPeersLowWater {
goto wait
}
if t.needData() || cl.seeding(t) {
}
cl.mu.Lock()
cl.addPeers(t, addPeers)
- numPeers := len(t.Peers)
+ numPeers := len(t.peers)
cl.mu.Unlock()
if numPeers >= torrentPeersHighWater {
break getPeers
}
cl.mu.RLock()
req.Left = t.bytesLeftAnnounce()
- trackers := t.Trackers
+ trackers := t.trackers
cl.mu.RUnlock()
if cl.announceTorrentTrackersFastStart(&req, trackers, t) {
req.Event = tracker.None
for cl.waitWantPeers(t) {
cl.mu.RLock()
req.Left = t.bytesLeftAnnounce()
- trackers = t.Trackers
+ trackers = t.trackers
cl.mu.RUnlock()
numTrackersTried := 0
for _, tier := range trackers {
}
index := int(req.Index)
- piece := &t.Pieces[index]
+ piece := &t.pieces[index]
// Do we actually want this chunk?
if !t.wantChunk(req) {
piece.unpendChunkIndex(chunkIndex(req.chunkSpec, t.chunkSize))
// Cancel pending requests for this chunk.
- for _, c := range t.Conns {
+ for _, c := range t.conns {
if me.connCancel(t, c, req) {
c.updateRequests()
}
// Return the connections that touched a piece, and clear the entry while
// doing it.
func (me *Client) reapPieceTouches(t *torrent, piece int) (ret []*connection) {
- for _, c := range t.Conns {
+ for _, c := range t.conns {
if _, ok := c.peerTouchedPieces[piece]; ok {
ret = append(ret, c)
delete(c.peerTouchedPieces, piece)
}
func (me *Client) pieceHashed(t *torrent, piece int, correct bool) {
- p := &t.Pieces[piece]
+ p := &t.pieces[piece]
if p.EverHashed {
// Don't score the first time a piece is hashed, it could be an
// initial check.
func (me *Client) onCompletedPiece(t *torrent, piece int) {
t.pendingPieces.Remove(piece)
t.pendAllChunkSpecs(piece)
- for _, conn := range t.Conns {
+ for _, conn := range t.conns {
conn.Have(piece)
for r := range conn.Requests {
if int(r.Index) == piece {
return
}
me.openNewConns(t)
- for _, conn := range t.Conns {
+ for _, conn := range t.conns {
if conn.PeerHasPiece(piece) {
conn.updateRequests()
}
func (cl *Client) verifyPiece(t *torrent, piece int) {
cl.mu.Lock()
defer cl.mu.Unlock()
- p := &t.Pieces[piece]
+ p := &t.pieces[piece]
for p.Hashing || t.storage == nil {
cl.event.Wait()
}
ceasingNetworking chan struct{}
InfoHash metainfo.InfoHash
- Pieces []piece
+ pieces []piece
// Values are the piece indices that changed.
pieceStateChanges *pubsub.PubSub
chunkSize pp.Integer
storage storage.Torrent
// The info dict. Nil if we don't have it (yet).
- Info *metainfo.InfoEx
+ info *metainfo.InfoEx
// Active peer connections, running message stream loops.
- Conns []*connection
+ conns []*connection
// Set of addrs to which we're attempting to connect. Connections are
// half-open until all handshakes are completed.
- HalfOpen map[string]struct{}
+ halfOpen map[string]struct{}
// Reserve of peers to connect to. A peer can be both here and in the
// active connections if were told about the peer after connecting with
// them. That encourages us to reconnect to peers that are well known.
- Peers map[peersKey]Peer
+ peers map[peersKey]Peer
wantPeers sync.Cond
// BEP 12 Multitracker Metadata Extension. The tracker.Client instances
// mirror their respective URLs from the announce-list metainfo key.
- Trackers []trackerTier
+ trackers []trackerTier
// Name used if the info name isn't available.
displayName string
// The bencoded bytes of the info dict.
- MetaData []byte
+ metadataBytes []byte
// Each element corresponds to the 16KiB metadata pieces. If true, we have
// received that piece.
- metadataHave []bool
+ metadataCompletedChunks []bool
// Closed when .Info is set.
gotMetainfo chan struct{}
}
func (t *torrent) pieceCompleteUncached(piece int) bool {
- return t.Pieces[piece].Storage().GetIsComplete()
+ return t.pieces[piece].Storage().GetIsComplete()
}
func (t *torrent) numConnsUnchoked() (num int) {
- for _, c := range t.Conns {
+ for _, c := range t.conns {
if !c.PeerChoked {
num++
}
// There's a connection to that address already.
func (t *torrent) addrActive(addr string) bool {
- if _, ok := t.HalfOpen[addr]; ok {
+ if _, ok := t.halfOpen[addr]; ok {
return true
}
- for _, c := range t.Conns {
+ for _, c := range t.conns {
if c.remoteAddr().String() == addr {
return true
}
func (t *torrent) worstConns(cl *Client) (wcs *worstConns) {
wcs = &worstConns{
- c: make([]*connection, 0, len(t.Conns)),
+ c: make([]*connection, 0, len(t.conns)),
t: t,
cl: cl,
}
- for _, c := range t.Conns {
+ for _, c := range t.conns {
if !c.closed.IsSet() {
wcs.c = append(wcs.c, c)
}
default:
}
close(t.ceasingNetworking)
- for _, c := range t.Conns {
+ for _, c := range t.conns {
c.Close()
}
}
func (t *torrent) addPeer(p Peer, cl *Client) {
cl.openNewConns(t)
- if len(t.Peers) >= torrentPeersHighWater {
+ if len(t.peers) >= torrentPeersHighWater {
return
}
key := peersKey{string(p.IP), p.Port}
- if _, ok := t.Peers[key]; ok {
+ if _, ok := t.peers[key]; ok {
return
}
- t.Peers[key] = p
+ t.peers[key] = p
peersAddedBySource.Add(string(p.Source), 1)
cl.openNewConns(t)
}
func (t *torrent) invalidateMetadata() {
- t.MetaData = nil
- t.metadataHave = nil
- t.Info = nil
+ t.metadataBytes = nil
+ t.metadataCompletedChunks = nil
+ t.info = nil
}
func (t *torrent) saveMetadataPiece(index int, data []byte) {
if t.haveInfo() {
return
}
- if index >= len(t.metadataHave) {
+ if index >= len(t.metadataCompletedChunks) {
log.Printf("%s: ignoring metadata piece %d", t, index)
return
}
- copy(t.MetaData[(1<<14)*index:], data)
- t.metadataHave[index] = true
+ copy(t.metadataBytes[(1<<14)*index:], data)
+ t.metadataCompletedChunks[index] = true
}
func (t *torrent) metadataPieceCount() int {
- return (len(t.MetaData) + (1 << 14) - 1) / (1 << 14)
+ return (len(t.metadataBytes) + (1 << 14) - 1) / (1 << 14)
}
func (t *torrent) haveMetadataPiece(piece int) bool {
if t.haveInfo() {
- return (1<<14)*piece < len(t.MetaData)
+ return (1<<14)*piece < len(t.metadataBytes)
} else {
- return piece < len(t.metadataHave) && t.metadataHave[piece]
+ return piece < len(t.metadataCompletedChunks) && t.metadataCompletedChunks[piece]
}
}
func (t *torrent) metadataSizeKnown() bool {
- return t.MetaData != nil
+ return t.metadataBytes != nil
}
func (t *torrent) metadataSize() int {
- return len(t.MetaData)
+ return len(t.metadataBytes)
}
func infoPieceHashes(info *metainfo.Info) (ret []string) {
err = fmt.Errorf("bad info: %s", err)
return
}
- t.Info = &metainfo.InfoEx{
+ t.info = &metainfo.InfoEx{
Info: *md,
Bytes: infoBytes,
Hash: &t.InfoHash,
}
- t.storage, err = t.storageOpener.OpenTorrent(t.Info)
+ t.storage, err = t.storageOpener.OpenTorrent(t.info)
if err != nil {
return
}
t.length = 0
- for _, f := range t.Info.UpvertedFiles() {
+ for _, f := range t.info.UpvertedFiles() {
t.length += f.Length
}
- t.MetaData = infoBytes
- t.metadataHave = nil
+ t.metadataBytes = infoBytes
+ t.metadataCompletedChunks = nil
hashes := infoPieceHashes(md)
- t.Pieces = make([]piece, len(hashes))
+ t.pieces = make([]piece, len(hashes))
for i, hash := range hashes {
- piece := &t.Pieces[i]
+ piece := &t.pieces[i]
piece.t = t
piece.index = i
piece.noPendingWrites.L = &piece.pendingWritesMutex
missinggo.CopyExact(piece.Hash[:], hash)
}
- for _, conn := range t.Conns {
+ for _, conn := range t.conns {
if err := conn.setNumPieces(t.numPieces()); err != nil {
log.Printf("closing connection: %s", err)
conn.Close()
}
}
- for i := range t.Pieces {
+ for i := range t.pieces {
t.updatePieceCompletion(i)
- t.Pieces[i].QueuedForHash = true
+ t.pieces[i].QueuedForHash = true
}
go func() {
- for i := range t.Pieces {
+ for i := range t.pieces {
t.verifyPiece(i)
}
}()
if t.haveInfo() {
return true
}
- if t.metadataHave == nil {
+ if t.metadataCompletedChunks == nil {
return false
}
- for _, have := range t.metadataHave {
+ for _, have := range t.metadataCompletedChunks {
if !have {
return false
}
log.Printf("received bad metadata size: %d", bytes)
return
}
- if t.MetaData != nil && len(t.MetaData) == int(bytes) {
+ if t.metadataBytes != nil && len(t.metadataBytes) == int(bytes) {
return
}
- t.MetaData = make([]byte, bytes)
- t.metadataHave = make([]bool, (bytes+(1<<14)-1)/(1<<14))
- for _, c := range t.Conns {
+ t.metadataBytes = make([]byte, bytes)
+ t.metadataCompletedChunks = make([]bool, (bytes+(1<<14)-1)/(1<<14))
+ for _, c := range t.conns {
cl.requestPendingMetadata(t, c)
}
// or a display name given such as by the dn value in a magnet link, or "".
func (t *torrent) Name() string {
if t.haveInfo() {
- return t.Info.Name
+ return t.info.Name
}
return t.displayName
}
func (t *torrent) pieceState(index int) (ret PieceState) {
- p := &t.Pieces[index]
+ p := &t.pieces[index]
ret.Priority = t.piecePriority(index)
if t.pieceComplete(index) {
ret.Complete = true
}
func (t *torrent) metadataPieceSize(piece int) int {
- return metadataPieceSize(len(t.MetaData), piece)
+ return metadataPieceSize(len(t.metadataBytes), piece)
}
func (t *torrent) newMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
"piece": piece,
}
if data != nil {
- d["total_size"] = len(t.MetaData)
+ d["total_size"] = len(t.metadataBytes)
}
p, err := bencode.Marshal(d)
if err != nil {
Length: int(count),
})
})
- for index := range t.Pieces {
+ for index := range t.pieces {
rle.Append(t.pieceState(index), 1)
}
rle.Flush()
fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
if !t.haveInfo() {
fmt.Fprintf(w, "Metadata have: ")
- for _, h := range t.metadataHave {
+ for _, h := range t.metadataCompletedChunks {
fmt.Fprintf(w, "%c", func() rune {
if h {
return 'H'
})
fmt.Fprintln(w)
fmt.Fprintf(w, "Trackers: ")
- for _, tier := range t.Trackers {
+ for _, tier := range t.trackers {
for _, tr := range tier {
fmt.Fprintf(w, "%q ", tr)
}
}
fmt.Fprintf(w, "\n")
- fmt.Fprintf(w, "Pending peers: %d\n", len(t.Peers))
- fmt.Fprintf(w, "Half open: %d\n", len(t.HalfOpen))
- fmt.Fprintf(w, "Active peers: %d\n", len(t.Conns))
+ fmt.Fprintf(w, "Pending peers: %d\n", len(t.peers))
+ fmt.Fprintf(w, "Half open: %d\n", len(t.halfOpen))
+ fmt.Fprintf(w, "Active peers: %d\n", len(t.conns))
sort.Sort(&worstConns{
- c: t.Conns,
+ c: t.conns,
t: t,
cl: cl,
})
- for i, c := range t.Conns {
+ for i, c := range t.conns {
fmt.Fprintf(w, "%2d. ", i+1)
c.WriteStatus(w, t)
}
}
func (t *torrent) haveInfo() bool {
- return t.Info != nil
+ return t.info != nil
}
// TODO: Include URIs that weren't converted to tracker clients.
func (t *torrent) announceList() (al [][]string) {
- missinggo.CastSlice(&al, t.Trackers)
+ missinggo.CastSlice(&al, t.trackers)
return
}
// Returns a run-time generated MetaInfo that includes the info bytes and
// announce-list as currently known to the client.
func (t *torrent) MetaInfo() *metainfo.MetaInfo {
- if t.MetaData == nil {
+ if t.metadataBytes == nil {
panic("info bytes not set")
}
return &metainfo.MetaInfo{
- Info: *t.Info,
+ Info: *t.info,
CreationDate: time.Now().Unix(),
Comment: "dynamic metainfo from client",
CreatedBy: "go.torrent",
func (t *torrent) bytesLeft() (left int64) {
for i := 0; i < t.numPieces(); i++ {
- left += int64(t.Pieces[i].bytesLeft())
+ left += int64(t.pieces[i].bytesLeft())
}
return
}
if t.pieceAllDirty(piece) {
return false
}
- return t.Pieces[piece].hasDirtyChunks()
+ return t.pieces[piece].hasDirtyChunks()
}
func numChunksForPiece(chunkSize int, pieceSize int) int {
}
func (t *torrent) usualPieceSize() int {
- return int(t.Info.PieceLength)
+ return int(t.info.PieceLength)
}
func (t *torrent) lastPieceSize() int {
}
func (t *torrent) numPieces() int {
- return t.Info.NumPieces()
+ return t.info.NumPieces()
}
func (t *torrent) numPiecesCompleted() (num int) {
if c, ok := t.storage.(io.Closer); ok {
c.Close()
}
- for _, conn := range t.Conns {
+ for _, conn := range t.conns {
conn.Close()
}
t.pieceStateChanges.Close()
// Return the request that would include the given offset into the torrent
// data. Returns !ok if there is no such request.
func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
- return torrentOffsetRequest(t.length, t.Info.PieceLength, int64(t.chunkSize), off)
+ return torrentOffsetRequest(t.length, t.info.PieceLength, int64(t.chunkSize), off)
}
func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
tr := perf.NewTimer()
- n, err := t.Pieces[piece].Storage().WriteAt(data, begin)
+ n, err := t.pieces[piece].Storage().WriteAt(data, begin)
if err == nil && n != len(data) {
err = io.ErrShortWrite
}
}
func (t *torrent) validOutgoingRequest(r request) bool {
- if r.Index >= pp.Integer(t.Info.NumPieces()) {
+ if r.Index >= pp.Integer(t.info.NumPieces()) {
return false
}
if r.Begin%t.chunkSize != 0 {
}
func (t *torrent) pendAllChunkSpecs(pieceIndex int) {
- t.Pieces[pieceIndex].DirtyChunks.Clear()
+ t.pieces[pieceIndex].DirtyChunks.Clear()
}
type Peer struct {
}
func (t *torrent) pieceLength(piece int) (len_ pp.Integer) {
- if piece < 0 || piece >= t.Info.NumPieces() {
+ if piece < 0 || piece >= t.info.NumPieces() {
return
}
if int(piece) == t.numPieces()-1 {
- len_ = pp.Integer(t.length % t.Info.PieceLength)
+ len_ = pp.Integer(t.length % t.info.PieceLength)
}
if len_ == 0 {
- len_ = pp.Integer(t.Info.PieceLength)
+ len_ = pp.Integer(t.info.PieceLength)
}
return
}
func (t *torrent) hashPiece(piece int) (ret pieceSum) {
hash := pieceHash.New()
- p := &t.Pieces[piece]
+ p := &t.pieces[piece]
p.waitNoPendingWrites()
- ip := t.Info.Piece(piece)
+ ip := t.info.Piece(piece)
pl := ip.Length()
- n, err := io.Copy(hash, io.NewSectionReader(t.Pieces[piece].Storage(), 0, pl))
+ n, err := io.Copy(hash, io.NewSectionReader(t.pieces[piece].Storage(), 0, pl))
if n == pl {
missinggo.CopyExact(&ret, hash.Sum(nil))
return
}
func (me *torrent) haveAnyPieces() bool {
- for i := range me.Pieces {
+ for i := range me.pieces {
if me.pieceComplete(i) {
return true
}
if t.pieceComplete(int(r.Index)) {
return true
}
- p := &t.Pieces[r.Index]
+ p := &t.pieces[r.Index]
return !p.pendingChunk(r.chunkSpec, t.chunkSize)
}
if !t.wantPiece(int(r.Index)) {
return false
}
- if t.Pieces[r.Index].pendingChunk(r.chunkSpec, t.chunkSize) {
+ if t.pieces[r.Index].pendingChunk(r.chunkSpec, t.chunkSize) {
return true
}
// TODO: What about pieces that were wanted, but aren't now, and aren't
if !t.haveInfo() {
return false
}
- p := &t.Pieces[index]
+ p := &t.pieces[index]
if p.QueuedForHash {
return false
}
func (t *torrent) publishPieceChange(piece int) {
cur := t.pieceState(piece)
- p := &t.Pieces[piece]
+ p := &t.pieces[piece]
if cur != p.PublicPieceState {
p.PublicPieceState = cur
t.pieceStateChanges.Publish(PieceStateChange{
if t.pieceComplete(piece) {
return 0
}
- return t.pieceNumChunks(piece) - t.Pieces[piece].numDirtyChunks()
+ return t.pieceNumChunks(piece) - t.pieces[piece].numDirtyChunks()
}
func (t *torrent) pieceAllDirty(piece int) bool {
- return t.Pieces[piece].DirtyChunks.Len() == t.pieceNumChunks(piece)
+ return t.pieces[piece].DirtyChunks.Len() == t.pieceNumChunks(piece)
}
func (t *torrent) forUrgentPieces(f func(piece int) (again bool)) (all bool) {
}
func (t *torrent) piecePriorityChanged(piece int) {
- for _, c := range t.Conns {
+ for _, c := range t.conns {
c.updatePiecePriority(piece)
}
t.maybeNewConns()
}
func (t *torrent) updatePiecePriority(piece int) bool {
- p := &t.Pieces[piece]
+ p := &t.pieces[piece]
newPrio := t.piecePriorityUncached(piece)
if newPrio == p.priority {
return false
return true
})
for i, prio := range newPrios {
- if prio != t.Pieces[i].priority {
- t.Pieces[i].priority = prio
+ if prio != t.pieces[i].priority {
+ t.pieces[i].priority = prio
t.piecePriorityChanged(i)
}
}
if size <= 0 {
return
}
- begin = int(off / t.Info.PieceLength)
- end = int((off + size + t.Info.PieceLength - 1) / t.Info.PieceLength)
- if end > t.Info.NumPieces() {
- end = t.Info.NumPieces()
+ begin = int(off / t.info.PieceLength)
+ end = int((off + size + t.info.PieceLength - 1) / t.info.PieceLength)
+ if end > t.info.NumPieces() {
+ end = t.info.NumPieces()
}
return
}
if !t.haveInfo() {
return PiecePriorityNone
}
- return t.Pieces[piece].priority
+ return t.pieces[piece].priority
}
func (t *torrent) piecePriorityUncached(piece int) (ret piecePriority) {
if !c.PeerHasPiece(piece) {
return true
}
- chunkIndices := t.Pieces[piece].undirtiedChunkIndices().ToSortedSlice()
+ chunkIndices := t.pieces[piece].undirtiedChunkIndices().ToSortedSlice()
return itertools.ForPerm(len(chunkIndices), func(i int) bool {
req := request{pp.Integer(piece), t.chunkIndexSpec(chunkIndices[i], piece)}
return c.Request(req)
func (t *torrent) pendRequest(req request) {
ci := chunkIndex(req.chunkSpec, t.chunkSize)
- t.Pieces[req.Index].pendChunkIndex(ci)
+ t.pieces[req.Index].pendChunkIndex(ci)
}
func (t *torrent) pieceChanged(piece int) {
// Non-blocking read. Client lock is not required.
func (t *torrent) readAt(b []byte, off int64) (n int, err error) {
- p := &t.Pieces[off/t.Info.PieceLength]
+ p := &t.pieces[off/t.info.PieceLength]
p.waitNoPendingWrites()
return p.Storage().ReadAt(b, off-p.Info().Offset())
}