PiecePriorityNow // A Reader is reading in this piece.
)
-type piece struct {
+type Piece struct {
// The completed piece SHA1 hash, from the metainfo "pieces" field.
- Hash metainfo.Hash
+ hash metainfo.Hash
t *Torrent
index int
// Chunks we've written to since the last check. The chunk offset and
// length can be determined by the request chunkSize in use.
- DirtyChunks bitmap.Bitmap
+ dirtyChunks bitmap.Bitmap
- Hashing bool
- QueuedForHash bool
- EverHashed bool
+ hashing bool
+ queuedForHash bool
+ everHashed bool
numVerifies int64
- PublicPieceState PieceState
+ publicPieceState PieceState
priority piecePriority
pendingWritesMutex sync.Mutex
noPendingWrites sync.Cond
}
-func (p *piece) Info() metainfo.Piece {
+func (p *Piece) Info() metainfo.Piece {
return p.t.info.Piece(p.index)
}
-func (p *piece) Storage() storage.Piece {
+func (p *Piece) Storage() storage.Piece {
return p.t.storage.Piece(p.Info())
}
-func (p *piece) pendingChunkIndex(chunkIndex int) bool {
- return !p.DirtyChunks.Contains(chunkIndex)
+func (p *Piece) pendingChunkIndex(chunkIndex int) bool {
+ return !p.dirtyChunks.Contains(chunkIndex)
}
-func (p *piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
+func (p *Piece) pendingChunk(cs chunkSpec, chunkSize pp.Integer) bool {
return p.pendingChunkIndex(chunkIndex(cs, chunkSize))
}
-func (p *piece) hasDirtyChunks() bool {
- return p.DirtyChunks.Len() != 0
+func (p *Piece) hasDirtyChunks() bool {
+ return p.dirtyChunks.Len() != 0
}
-func (p *piece) numDirtyChunks() (ret int) {
- return p.DirtyChunks.Len()
+func (p *Piece) numDirtyChunks() (ret int) {
+ return p.dirtyChunks.Len()
}
-func (p *piece) unpendChunkIndex(i int) {
- p.DirtyChunks.Add(i)
+func (p *Piece) unpendChunkIndex(i int) {
+ p.dirtyChunks.Add(i)
}
-func (p *piece) pendChunkIndex(i int) {
- p.DirtyChunks.Remove(i)
+func (p *Piece) pendChunkIndex(i int) {
+ p.dirtyChunks.Remove(i)
}
-func (p *piece) numChunks() int {
+func (p *Piece) numChunks() int {
return p.t.pieceNumChunks(p.index)
}
-func (p *piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
- ret = p.DirtyChunks.Copy()
+func (p *Piece) undirtiedChunkIndices() (ret bitmap.Bitmap) {
+ ret = p.dirtyChunks.Copy()
ret.FlipRange(0, p.numChunks())
return
}
-func (p *piece) incrementPendingWrites() {
+func (p *Piece) incrementPendingWrites() {
p.pendingWritesMutex.Lock()
p.pendingWrites++
p.pendingWritesMutex.Unlock()
}
-func (p *piece) decrementPendingWrites() {
+func (p *Piece) decrementPendingWrites() {
p.pendingWritesMutex.Lock()
if p.pendingWrites == 0 {
panic("assertion")
p.pendingWritesMutex.Unlock()
}
-func (p *piece) waitNoPendingWrites() {
+func (p *Piece) waitNoPendingWrites() {
p.pendingWritesMutex.Lock()
for p.pendingWrites != 0 {
p.noPendingWrites.Wait()
p.pendingWritesMutex.Unlock()
}
-func (p *piece) chunkIndexDirty(chunk int) bool {
- return p.DirtyChunks.Contains(chunk)
+func (p *Piece) chunkIndexDirty(chunk int) bool {
+ return p.dirtyChunks.Contains(chunk)
}
-func (p *piece) chunkIndexSpec(chunk int) chunkSpec {
+func (p *Piece) chunkIndexSpec(chunk int) chunkSpec {
return chunkIndexSpec(chunk, p.length(), p.chunkSize())
}
-func (p *piece) numDirtyBytes() (ret pp.Integer) {
+func (p *Piece) numDirtyBytes() (ret pp.Integer) {
defer func() {
if ret > p.length() {
panic("too many dirty bytes")
return
}
-func (p *piece) length() pp.Integer {
+func (p *Piece) length() pp.Integer {
return p.t.pieceLength(p.index)
}
-func (p *piece) chunkSize() pp.Integer {
+func (p *Piece) chunkSize() pp.Integer {
return p.t.chunkSize
}
-func (p *piece) lastChunkIndex() int {
+func (p *Piece) lastChunkIndex() int {
return p.numChunks() - 1
}
-func (p *piece) bytesLeft() (ret pp.Integer) {
+func (p *Piece) bytesLeft() (ret pp.Integer) {
if p.t.pieceComplete(p.index) {
return 0
}
return p.length() - p.numDirtyBytes()
}
-func (p *piece) VerifyData() {
+func (p *Piece) VerifyData() {
p.t.cl.mu.Lock()
defer p.t.cl.mu.Unlock()
target := p.numVerifies + 1
- if p.Hashing {
+ if p.hashing {
target++
}
p.t.queuePieceCheck(p.index)
closed missinggo.Event
infoHash metainfo.Hash
- pieces []piece
+ pieces []Piece
// Values are the piece indices that changed.
pieceStateChanges *pubsub.PubSub
// The size of chunks to request from peers over the wire. This is
func (t *Torrent) makePieces() {
hashes := infoPieceHashes(t.info)
- t.pieces = make([]piece, len(hashes))
+ t.pieces = make([]Piece, len(hashes))
for i, hash := range hashes {
piece := &t.pieces[i]
piece.t = t
piece.index = i
piece.noPendingWrites.L = &piece.pendingWritesMutex
- missinggo.CopyExact(piece.Hash[:], hash)
+ missinggo.CopyExact(piece.hash[:], hash)
}
}
if t.pieceComplete(index) {
ret.Complete = true
}
- if p.QueuedForHash || p.Hashing {
+ if p.queuedForHash || p.hashing {
ret.Checking = true
}
if !ret.Complete && t.piecePartiallyDownloaded(index) {
}
func (t *Torrent) pendAllChunkSpecs(pieceIndex int) {
- t.pieces[pieceIndex].DirtyChunks.Clear()
+ t.pieces[pieceIndex].dirtyChunks.Clear()
}
type Peer struct {
return false
}
p := &t.pieces[index]
- if p.QueuedForHash {
+ if p.queuedForHash {
return false
}
- if p.Hashing {
+ if p.hashing {
return false
}
if t.pieceComplete(index) {
func (t *Torrent) publishPieceChange(piece int) {
cur := t.pieceState(piece)
p := &t.pieces[piece]
- if cur != p.PublicPieceState {
- p.PublicPieceState = cur
+ if cur != p.publicPieceState {
+ p.publicPieceState = cur
t.pieceStateChanges.Publish(PieceStateChange{
piece,
cur,
}
func (t *Torrent) pieceAllDirty(piece int) bool {
- return t.pieces[piece].DirtyChunks.Len() == t.pieceNumChunks(piece)
+ return t.pieces[piece].dirtyChunks.Len() == t.pieceNumChunks(piece)
}
func (t *Torrent) readersChanged() {
}
p := &t.pieces[piece]
touchers := t.reapPieceTouchers(piece)
- if p.EverHashed {
+ if p.everHashed {
// Don't score the first time a piece is hashed, it could be an
// initial check.
if correct {
pieceHashedCorrect.Add(1)
} else {
- log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.Hash, len(touchers))
+ log.Printf("%s: piece %d (%s) failed hash: %d connections contributed", t, piece, p.hash, len(touchers))
pieceHashedNotCorrect.Add(1)
}
}
- p.EverHashed = true
+ p.everHashed = true
if correct {
for _, c := range touchers {
c.goodPiecesDirtied++
cl.mu.Lock()
defer cl.mu.Unlock()
p := &t.pieces[piece]
- for p.Hashing || t.storage == nil {
+ for p.hashing || t.storage == nil {
cl.event.Wait()
}
- p.QueuedForHash = false
+ p.queuedForHash = false
if t.closed.IsSet() || t.pieceComplete(piece) {
t.updatePiecePriority(piece)
return
}
- p.Hashing = true
+ p.hashing = true
t.publishPieceChange(piece)
cl.mu.Unlock()
sum := t.hashPiece(piece)
cl.mu.Lock()
p.numVerifies++
- p.Hashing = false
- t.pieceHashed(piece, sum == p.Hash)
+ p.hashing = false
+ t.pieceHashed(piece, sum == p.hash)
}
// Return the connections that touched a piece, and clear the entry while
// Currently doesn't really queue, but should in the future.
func (t *Torrent) queuePieceCheck(pieceIndex int) {
piece := &t.pieces[pieceIndex]
- if piece.QueuedForHash {
+ if piece.queuedForHash {
return
}
- piece.QueuedForHash = true
+ piece.queuedForHash = true
t.publishPieceChange(pieceIndex)
go t.verifyPiece(pieceIndex)
}