c.PeerPieces = append(c.PeerPieces, false)
}
c.PeerPieces[piece] = true
- if t.wantPiece(piece) {
+ if !t.havePiece(piece) {
me.replenishConnRequests(t, c)
}
}
Index: pp.Integer(piece),
})
// TODO: Cancel requests for this piece.
- } else {
- if conn.PeerHasPiece(piece) {
- me.replenishConnRequests(t, conn)
+ for r := range conn.Requests {
+ if r.Index == piece {
+ panic("wat")
+ }
}
}
+ // Do this even if the piece is correct because new first-hashings may
+ // need to be scheduled.
+ if conn.PeerHasPiece(piece) {
+ me.replenishConnRequests(t, conn)
+ }
}
if t.haveAllPieces() && me.noUpload {
t.CeaseNetworking()
if me.c.RequestPending(req) {
return true
}
+ if !me.t.wantChunk(req) {
+ return true
+ }
again := me.c.Request(req)
if me.c.RequestPending(req) {
me.s.requestHeat[me.t][req]++
// Fill priority requests.
func (me *requestFiller) priorities() bool {
for req := range me.s.priorities[me.t] {
+ // TODO: Perhaps this filter should be applied to every request?
if _, ok := me.t.Pieces[req.Index].PendingChunkSpecs[req.chunkSpec]; !ok {
panic(req)
}
func (me *requestFiller) completePartial() bool {
t := me.t
th := me.s.requestHeat[t]
+ lro, lroOk := me.s.lastReadOffset[t]
for e := t.IncompletePiecesByBytesLeft.Front(); e != nil; e = e.Next() {
p := e.Value.(int)
// Stop when we reach pieces that aren't partial and aren't smaller
if !t.PiecePartiallyDownloaded(p) && int(t.PieceLength(pp.Integer(p))) == t.UsualPieceSize() {
break
}
+ // Skip pieces that are entirely inside the readahead zone.
+ if lroOk {
+ pieceOff := int64(p) * int64(t.UsualPieceSize())
+ pieceEndOff := pieceOff + int64(t.PieceLength(pp.Integer(p)))
+ if pieceOff >= lro && pieceEndOff < lro+me.s.Readahead {
+ continue
+ }
+ }
for chunkSpec := range t.Pieces[p].PendingChunkSpecs {
r := request{pp.Integer(p), chunkSpec}
if th[r] >= 1 {
continue
}
- if lastReadOffset, ok := me.s.lastReadOffset[t]; ok {
+ if lroOk {
off := me.t.requestOffset(r)
- if off >= lastReadOffset && off < lastReadOffset+me.s.Readahead {
+ if off >= lro && off < lro+me.s.Readahead {
continue
}
}
if len(rr) == 0 {
return true
}
- // Produce a partially sorted random permutation into the readahead chunks to somewhat preserve order but reducing wasted chunks due to overlap with other peers.
+ // Produce a partially sorted random permutation into the readahead chunks
+ // to somewhat preserve order but reducing wasted chunks due to overlap
+ // with other peers.
ii := new(intHeap)
*ii = me.s.rand.Perm(len(rr))
heap.Init(ii)
// Lose the length of this block.
_len -= int64(req.Length)
off = reqOff + int64(req.Length)
- if t.wantChunk(req) {
+ if !t.haveChunk(req) {
s.priorities[t][req] = struct{}{}
}
}