--- /dev/null
+package merkle
+
+import (
+ "crypto/sha256"
+ "hash"
+)
+
+func NewHash() *Hash {
+ return &Hash{
+ nextBlock: sha256.New(),
+ }
+}
+
+type Hash struct {
+ blocks [][32]byte
+ nextBlock hash.Hash
+ written int
+}
+
+func (h *Hash) remaining() int {
+ return BlockSize - h.written
+}
+
+func (h *Hash) Write(p []byte) (n int, err error) {
+ for len(p) > 0 {
+ var n1 int
+ n1, err = h.nextBlock.Write(p[:min(len(p), h.remaining())])
+ n += n1
+ h.written += n1
+ p = p[n1:]
+ if h.remaining() == 0 {
+ h.blocks = append(h.blocks, h.nextBlockSum())
+ h.nextBlock.Reset()
+ h.written = 0
+ }
+ if err != nil {
+ break
+ }
+ }
+ return
+}
+
+func (h *Hash) nextBlockSum() (sum [32]byte) {
+ h.nextBlock.Sum(sum[:0])
+ return
+}
+
+func (h *Hash) Sum(b []byte) []byte {
+ blocks := h.blocks
+ if h.written != 0 {
+ blocks = append(blocks, h.nextBlockSum())
+ }
+ n := int(RoundUpToPowerOfTwo(uint(len(blocks))))
+ blocks = append(blocks, make([][32]byte, n-len(blocks))...)
+ sum := Root(blocks)
+ return append(b, sum[:]...)
+}
+
+func (h *Hash) Reset() {
+ h.blocks = h.blocks[:0]
+ h.nextBlock.Reset()
+}
+
+func (h *Hash) Size() int {
+ return 32
+}
+
+func (h *Hash) BlockSize() int {
+ return h.nextBlock.BlockSize()
+}
+
+var _ hash.Hash = (*Hash)(nil)
"github.com/anacrolix/torrent/merkle"
"github.com/anacrolix/torrent/types/infohash"
infohash_v2 "github.com/anacrolix/torrent/types/infohash-v2"
+ "hash"
"io"
"math/rand"
"net/netip"
return
}
compactLayer, ok := layers[string(f.piecesRoot.Value[:])]
- if !ok {
- continue
- }
var hashes [][32]byte
- hashes, err = merkle.CompactLayerToSliceHashes(compactLayer)
- if err != nil {
- err = fmt.Errorf("bad piece layers for file %q: %w", f, err)
- return
+ if ok {
+ hashes, err = merkle.CompactLayerToSliceHashes(compactLayer)
+ if err != nil {
+ err = fmt.Errorf("bad piece layers for file %q: %w", f, err)
+ return
+ }
+ } else if f.length > t.info.PieceLength {
+ // BEP 52 is pretty strongly worded about this, even though we should be able to
+ // recover: If a v2 torrent is added by magnet link or infohash, we need to fetch piece
+ // layers ourselves anyway, and that's how we can recover from this.
+ t.logger.Levelf(log.Warning, "no piece layers for file %q", f)
+ continue
+ } else {
+ hashes = [][32]byte{f.piecesRoot.Value}
}
if len(hashes) != f.numPieces() {
err = fmt.Errorf("file %q: got %v hashes expected %v", f, len(hashes), f.numPieces())
return
}
for i := range f.numPieces() {
- p := t.piece(f.BeginPieceIndex() + i)
- p.hashV2.Set(hashes[i])
+ pi := f.BeginPieceIndex() + i
+ p := t.piece(pi)
+ // See Torrent.onSetInfo. We want to trigger an initial check if appropriate, if we
+ // didn't yet have a piece hash (can occur with v2 when we don't start with piece
+ // layers).
+ if !p.hashV2.Set(hashes[i]).Ok && p.hash == nil {
+ t.queueInitialPieceCheck(pi)
+ }
}
}
return nil
p.relativeAvailability = t.selectivePieceAvailabilityFromPeers(i)
t.addRequestOrderPiece(i)
t.updatePieceCompletion(i)
- if !t.initialPieceCheckDisabled && !p.storageCompletionOk {
- // t.logger.Printf("piece %s completion unknown, queueing check", p)
- t.queuePieceCheck(i)
- }
+ t.queueInitialPieceCheck(i)
}
t.cl.event.Broadcast()
close(t.gotMetainfoC)
}
func (t *Torrent) hashPiece(piece pieceIndex) (
- ret metainfo.Hash,
+ correct bool,
// These are peers that sent us blocks that differ from what we hash here.
differingPeers map[bannableAddr]struct{},
err error,
) {
p := t.piece(piece)
p.waitNoPendingWrites()
- storagePiece := t.pieces[piece].Storage()
-
- // Does the backend want to do its own hashing?
- if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
- var sum metainfo.Hash
- // log.Printf("A piece decided to self-hash: %d", piece)
- sum, err = i.SelfHash()
- missinggo.CopyExact(&ret, sum)
- return
+ storagePiece := p.Storage()
+
+ var h hash.Hash
+ if p.hash != nil {
+ h = pieceHash.New()
+
+ // Does the backend want to do its own hashing?
+ if i, ok := storagePiece.PieceImpl.(storage.SelfHashing); ok {
+ var sum metainfo.Hash
+ // log.Printf("A piece decided to self-hash: %d", piece)
+ sum, err = i.SelfHash()
+ correct = sum == *p.hash
+ // Can't do smart banning without reading the piece. The smartBanCache is still cleared
+ // in pieceHasher regardless.
+ return
+ }
+
+ } else if p.hashV2.Ok {
+ h = merkle.NewHash()
+ } else {
+ panic("no hash")
}
- hash := pieceHash.New()
const logPieceContents = false
smartBanWriter := t.smartBanBlockCheckingWriter(piece)
- writers := []io.Writer{hash, smartBanWriter}
+ writers := []io.Writer{h, smartBanWriter}
var examineBuf bytes.Buffer
if logPieceContents {
writers = append(writers, &examineBuf)
}
smartBanWriter.Flush()
differingPeers = smartBanWriter.badPeers
- missinggo.CopyExact(&ret, hash.Sum(nil))
+ if p.hash != nil {
+ var sum [20]byte
+ n := len(h.Sum(sum[:0]))
+ if n != 20 {
+ panic(n)
+ }
+ correct = sum == *p.hash
+ } else if p.hashV2.Ok {
+ var sum [32]byte
+ n := len(h.Sum(sum[:0]))
+ if n != 32 {
+ panic(n)
+ }
+ correct = sum == p.hashV2.Value
+ } else {
+ panic("no hash")
+ }
return
}
} else {
log.Fmsg(
"piece %d failed hash: %d connections contributed", piece, len(p.dirtiers),
- ).AddValues(t, p).LogLevel(
-
- log.Debug, t.logger)
-
+ ).AddValues(t, p).LogLevel(log.Info, t.logger)
pieceHashedNotCorrect.Add(1)
}
}
func (t *Torrent) pieceHasher(index pieceIndex) {
p := t.piece(index)
- sum, failedPeers, copyErr := t.hashPiece(index)
- correct := sum == *p.hash
+ correct, failedPeers, copyErr := t.hashPiece(index)
switch copyErr {
case nil, io.EOF:
default:
return
}
+func (t *Torrent) queueInitialPieceCheck(i pieceIndex) {
+ if !t.initialPieceCheckDisabled && !t.piece(i).storageCompletionOk {
+ t.queuePieceCheck(i)
+ }
+}
+
func (t *Torrent) queuePieceCheck(pieceIndex pieceIndex) {
piece := t.piece(pieceIndex)
if piece.hash == nil && !piece.hashV2.Ok {