"crypto/sha1"
"errors"
"fmt"
- "github.com/nsf/libtorgo/bencode"
"io"
"log"
mathRand "math/rand"
"syscall"
"time"
- metainfo "github.com/nsf/libtorgo/torrent"
+ "github.com/anacrolix/libtorgo/metainfo"
+ "github.com/nsf/libtorgo/bencode"
pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
"bitbucket.org/anacrolix/go.torrent/tracker"
if t == nil {
return errors.New("no such active torrent")
}
- if t.Info == nil {
+ if !t.haveInfo() {
return errors.New("missing metadata")
}
newPriorities := make([]request, 0, (len_+chunkSize-1)/chunkSize)
err = errors.New("unknown torrent")
return
}
- index := pp.Integer(off / t.Info.PieceLength())
+ index := pp.Integer(off / int64(t.UsualPieceSize()))
// Reading outside the bounds of a file is an error.
if index < 0 {
err = os.ErrInvalid
Type: pp.Extended,
ExtendedID: pp.HandshakeExtendedID,
ExtendedPayload: func() []byte {
- b, err := bencode.Marshal(map[string]interface{}{
+ d := map[string]interface{}{
"m": map[string]int{
"ut_metadata": 1,
},
- })
+ }
+ if torrent.metadataSizeKnown() {
+ d["metadata_size"] = torrent.metadataSize()
+ }
+ b, err := bencode.Marshal(d)
if err != nil {
panic(err)
}
return
}
var pending []int
- for index, have := range t.MetaDataHave {
- if !have {
+ for index := 0; index < t.MetadataPieceCount(); index++ {
+ if !t.HaveMetadataPiece(index) {
pending = append(pending, index)
}
}
}
}
+func (cl *Client) completedMetadata(t *torrent) {
+ h := sha1.New()
+ h.Write(t.MetaData)
+ var ih InfoHash
+ copy(ih[:], h.Sum(nil)[:])
+ if ih != t.InfoHash {
+ log.Print("bad metadata")
+ t.InvalidateMetadata()
+ return
+ }
+ var info metainfo.Info
+ err := bencode.Unmarshal(t.MetaData, &info)
+ if err != nil {
+ log.Printf("error unmarshalling metadata: %s", err)
+ t.InvalidateMetadata()
+ return
+ }
+ cl.setMetaData(t, info, t.MetaData)
+}
+
+func (cl *Client) gotMetadataExtensionMsg(payload []byte, t *torrent, c *connection) (err error) {
+ var d map[string]int
+ err = bencode.Unmarshal(payload, &d)
+ if err != nil {
+ err = fmt.Errorf("error unmarshalling payload: %s", err)
+ return
+ }
+ msgType, ok := d["msg_type"]
+ if !ok {
+ err = errors.New("missing msg_type field")
+ return
+ }
+ piece := d["piece"]
+ log.Println(piece, d["total_size"], len(payload))
+ switch msgType {
+ case pp.DataMetadataExtensionMsgType:
+ if t.haveInfo() {
+ break
+ }
+ t.SaveMetadataPiece(piece, payload[len(payload)-metadataPieceSize(d["total_size"], piece):])
+ if !t.HaveAllMetadataPieces() {
+ break
+ }
+ cl.completedMetadata(t)
+ case pp.RequestMetadataExtensionMsgType:
+ if !t.HaveMetadataPiece(piece) {
+ c.Post(t.NewMetadataExtensionMessage(c, pp.RejectMetadataExtensionMsgType, d["piece"], nil))
+ break
+ }
+ c.Post(t.NewMetadataExtensionMessage(c, pp.DataMetadataExtensionMsgType, piece, t.MetaData[(1<<14)*piece:(1<<14)*piece+t.metadataPieceSize(piece)]))
+ case pp.RejectMetadataExtensionMsgType:
+ default:
+ err = errors.New("unknown msg_type value")
+ }
+ return
+}
+
func (me *Client) connectionLoop(t *torrent, c *connection) error {
decoder := pp.Decoder{
R: bufio.NewReader(c.Socket),
log.Printf("bad metadata_size type: %T", metadata_sizeUntyped)
} else {
log.Printf("metadata_size: %d", metadata_size)
- t.SetMetaDataSize(metadata_size)
+ t.SetMetadataSize(metadata_size)
}
}
+ log.Println(metadata_sizeUntyped, c.PeerExtensionIDs)
if _, ok := c.PeerExtensionIDs["ut_metadata"]; ok {
me.requestPendingMetadata(t, c)
}
case 1:
- var d map[string]int
- err := bencode.Unmarshal(msg.ExtendedPayload, &d)
- if err != nil {
- err = fmt.Errorf("error unmarshalling extended payload: %s", err)
- break
- }
- if d["msg_type"] != 1 {
- break
- }
- piece := d["piece"]
- log.Println(piece, d["total_size"], len(msg.ExtendedPayload))
- copy(t.MetaData[(1<<14)*piece:], msg.ExtendedPayload[len(msg.ExtendedPayload)-metadataPieceSize(d["total_size"], piece):])
- t.MetaDataHave[piece] = true
- if !t.GotAllMetadataPieces() {
- break
- }
- log.Printf("%q", t.MetaData)
- h := sha1.New()
- h.Write(t.MetaData)
- var ih InfoHash
- copy(ih[:], h.Sum(nil)[:])
- if ih != t.InfoHash {
- panic(ih)
- }
+ err = me.gotMetadataExtensionMsg(msg.ExtendedPayload, t, c)
+ default:
+ err = fmt.Errorf("unexpected extended message ID: %s", msg.ExtendedID)
}
default:
err = fmt.Errorf("received unknown message type: %#v", msg.Type)
return nil
}
-func (cl *Client) setMetaData(t *torrent, md MetaData) (err error) {
- t.Info = md
- t.Data, err = mmapTorrentData(md, cl.DataDir)
+func (cl *Client) setMetaData(t *torrent, md metainfo.Info, bytes []byte) (err error) {
+ err = t.setMetadata(md, cl.DataDir, bytes)
if err != nil {
return
}
- for _, hash := range md.PieceHashes() {
- piece := &piece{}
- copyHashSum(piece.Hash[:], []byte(hash))
- t.Pieces = append(t.Pieces, piece)
- t.pendAllChunkSpecs(pp.Integer(len(t.Pieces) - 1))
- }
- t.Priorities = list.New()
-
// Queue all pieces for hashing. This is done sequentially to avoid
// spamming goroutines.
for _, p := range t.Pieces {
if err != nil {
return
}
- err = me.setMetaData(t, metaInfoMetaData{metaInfo})
+ err = me.setMetaData(t, metaInfo.Info, metaInfo.InfoBytes)
if err != nil {
return
}
}
ppbs := t.piecesByPendingBytes()
// Then finish off incomplete pieces in order of bytes remaining.
- for _, heatThreshold := range []int{0, 4, 100} {
+ for _, heatThreshold := range []int{0, 1, 4, 100} {
for _, pieceIndex := range ppbs {
for _, chunkSpec := range t.Pieces[pieceIndex].shuffledPendingChunkSpecs() {
r := request{pieceIndex, chunkSpec}
func (me *ResponsiveDownloadStrategy) FillRequests(t *torrent, c *connection) {
for e := t.Priorities.Front(); e != nil; e = e.Next() {
+ req := e.Value.(request)
+ if _, ok := t.Pieces[req.Index].PendingChunkSpecs[req.chunkSpec]; !ok {
+ panic(req)
+ }
if !c.Request(e.Value.(request)) {
return
}
func (me *Client) replenishConnRequests(t *torrent, c *connection) {
me.DownloadStrategy.FillRequests(t, c)
me.assertRequestHeat()
- if len(c.Requests) == 0 {
+ if len(c.Requests) == 0 && !c.PeerChoked {
c.SetInterested(false)
}
}
func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
+ log.Println("got", req)
// Request has been satisfied.
me.connDeleteRequest(t, c, req)
if err != nil {
return err
}
- me.dataReady(dataSpec{t.InfoHash, req})
// Record that we have the chunk.
delete(t.Pieces[req.Index].PendingChunkSpecs, req.chunkSpec)
}
}
+ me.dataReady(dataSpec{t.InfoHash, req})
return nil
}
func TestTorrentInitialState(t *testing.T) {
dir, mi := testutil.GreetingTestTorrent()
defer os.RemoveAll(dir)
- tor, err := newTorrent(mi, dir)
+ tor, err := newTorrent(BytesInfoHash(mi.InfoHash), nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = tor.setMetadata(mi.Info, dir, mi.InfoBytes)
if err != nil {
t.Fatal(err)
}
"os"
"strings"
- metainfo "github.com/nsf/libtorgo/torrent"
+ "github.com/anacrolix/libtorgo/metainfo"
"bitbucket.org/anacrolix/go.torrent"
)
fusefs "bazil.org/fuse/fs"
"bitbucket.org/anacrolix/go.torrent"
"bitbucket.org/anacrolix/go.torrent/fs"
- metainfo "github.com/nsf/libtorgo/torrent"
+ "github.com/anacrolix/libtorgo/metainfo"
)
var (
if !c.PeerHasPiece(chunk.Index) {
return true
}
+ if c.RequestPending(chunk) {
+ return true
+ }
c.SetInterested(true)
if c.PeerChoked {
return false
}
- if c.RequestPending(chunk) {
- return true
- }
if c.Requests == nil {
c.Requests = make(map[request]struct{}, c.PeerMaxRequests)
}
func (conn *connection) writer() {
for b := range conn.write {
_, err := conn.Socket.Write(b)
+ // log.Printf("wrote %q to %s", b, conn.Socket.RemoteAddr())
if err != nil {
if !conn.getClosed() {
log.Print(err)
package torrentfs
import (
- "log"
- "os"
- "sync"
-
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"bitbucket.org/anacrolix/go.torrent"
- metainfo "github.com/nsf/libtorgo/torrent"
+ "github.com/anacrolix/libtorgo/metainfo"
+ "log"
+ "os"
+ "sync"
)
const (
type node struct {
path []string
- metaInfo *metainfo.MetaInfo
+ metadata *metainfo.Info
FS *torrentFS
InfoHash torrent.InfoHash
}
if size < 0 {
size = 0
}
- infoHash := torrent.BytesInfoHash(fn.metaInfo.InfoHash)
+ infoHash := fn.InfoHash
torrentOff := fn.TorrentOffset + req.Offset
- // log.Print(torrentOff, size, fn.TorrentOffset)
+ log.Print(torrentOff, size, fn.TorrentOffset)
if err := fn.FS.Client.PrioritizeDataRegion(infoHash, torrentOff, int64(size)); err != nil {
panic(err)
}
func (dn dirNode) ReadDir(intr fusefs.Intr) (des []fuse.Dirent, err fuse.Error) {
names := map[string]bool{}
- for _, fi := range dn.metaInfo.Files {
+ for _, fi := range dn.metadata.Files {
if !isSubPath(dn.path, fi.Path) {
continue
}
func (dn dirNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err fuse.Error) {
var torrentOffset int64
- for _, fi := range dn.metaInfo.Files {
+ for _, fi := range dn.metadata.Files {
if !isSubPath(dn.path, fi.Path) {
torrentOffset += fi.Length
continue
return
}
-func isSingleFileTorrent(mi *metainfo.MetaInfo) bool {
- return len(mi.Files) == 1 && mi.Files[0].Path == nil
+func isSingleFileTorrent(md *metainfo.Info) bool {
+ return len(md.Files) == 0
}
func (me rootNode) Lookup(name string, intr fusefs.Intr) (_node fusefs.Node, err fuse.Error) {
- for _, _torrent := range me.fs.Client.Torrents() {
- metaInfo := _torrent.MetaInfo
- if metaInfo.Name == name {
- __node := node{
- metaInfo: metaInfo,
- FS: me.fs,
- InfoHash: torrent.BytesInfoHash(metaInfo.InfoHash),
- }
- if isSingleFileTorrent(metaInfo) {
- _node = fileNode{__node, uint64(metaInfo.Files[0].Length), 0}
- } else {
- _node = dirNode{__node}
- }
- break
+ for _, t := range me.fs.Client.Torrents() {
+ if t.Name() != name {
+ continue
+ }
+ __node := node{
+ metadata: t.Info,
+ FS: me.fs,
+ InfoHash: t.InfoHash,
}
+ if isSingleFileTorrent(t.Info) {
+ _node = fileNode{__node, uint64(t.Info.Length), 0}
+ } else {
+ _node = dirNode{__node}
+ }
+ break
}
if _node == nil {
err = fuse.ENOENT
func (me rootNode) ReadDir(intr fusefs.Intr) (dirents []fuse.Dirent, err fuse.Error) {
for _, _torrent := range me.fs.Client.Torrents() {
- metaInfo := _torrent.MetaInfo
+ metaInfo := _torrent.Info
dirents = append(dirents, fuse.Dirent{
Name: metaInfo.Name,
Type: func() fuse.DirentType {
import (
"bytes"
+ "fmt"
"io/ioutil"
"log"
"net"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"bitbucket.org/anacrolix/go.torrent"
- metainfo "github.com/nsf/libtorgo/torrent"
+ "github.com/anacrolix/libtorgo/metainfo"
)
func TestTCPAddrString(t *testing.T) {
DisableTrackers: true,
}
client.Start()
+ log.Printf("%+v", *layout.Metainfo)
client.AddTorrent(layout.Metainfo)
fs := New(&client)
fuseConn, err := fuse.Mount(layout.MountDir)
}
return conn
}(),
+ DisableTrackers: true,
}
defer seeder.Listener.Close()
seeder.Start()
defer seeder.Stop()
- seeder.AddTorrent(layout.Metainfo)
+ err = seeder.AddMagnet(fmt.Sprintf("magnet:?xt=urn:btih:%x", layout.Metainfo.InfoHash))
+ if err != nil {
+ t.Fatal(err)
+ }
leecher := torrent.Client{
DataDir: filepath.Join(layout.BaseDir, "download"),
DownloadStrategy: &torrent.ResponsiveDownloadStrategy{},
+ DisableTrackers: true,
}
leecher.Start()
defer leecher.Stop()
}
go func() {
time.Sleep(10 * time.Second)
- fuse.Unmount(mountDir)
+ if err := fuse.Unmount(mountDir); err != nil {
+ t.Log(err)
+ }
}()
content, err := ioutil.ReadFile(filepath.Join(mountDir, "greeting"))
if err != nil {
import (
"bitbucket.org/anacrolix/go.torrent/mmap_span"
+ "bitbucket.org/anacrolix/go.torrent/peer_protocol"
"crypto"
"errors"
- metainfo "github.com/nsf/libtorgo/torrent"
+ "github.com/anacrolix/libtorgo/metainfo"
+ "launchpad.net/gommap"
"math/rand"
"os"
"path/filepath"
"time"
-
- "bitbucket.org/anacrolix/go.torrent/peer_protocol"
- "launchpad.net/gommap"
)
const (
ErrDataNotReady = errors.New("data not ready")
)
-type metaInfoMetaData struct {
- mi *metainfo.MetaInfo
-}
-
-func (me metaInfoMetaData) Files() []metainfo.FileInfo { return me.mi.Files }
-func (me metaInfoMetaData) Name() string { return me.mi.Name }
-func (me metaInfoMetaData) PieceHashes() (ret []string) {
- for i := 0; i < len(me.mi.Pieces); i += 20 {
- ret = append(ret, string(me.mi.Pieces[i:i+20]))
+func upvertedSingleFileInfoFiles(info *metainfo.Info) []metainfo.FileInfo {
+ if len(info.Files) != 0 {
+ return info.Files
}
- return
-}
-func (me metaInfoMetaData) PieceLength() int64 { return me.mi.PieceLength }
-func (me metaInfoMetaData) PieceCount() int {
- return len(me.mi.Pieces) / pieceHash.Size()
-}
-
-func NewMetaDataFromMetaInfo(mi *metainfo.MetaInfo) MetaData {
- return metaInfoMetaData{mi}
-}
-
-type MetaData interface {
- PieceHashes() []string
- Files() []metainfo.FileInfo
- Name() string
- PieceLength() int64
- PieceCount() int
+ return []metainfo.FileInfo{{Length: info.Length, Path: nil}}
}
-func mmapTorrentData(md MetaData, location string) (mms mmap_span.MMapSpan, err error) {
+func mmapTorrentData(md *metainfo.Info, location string) (mms mmap_span.MMapSpan, err error) {
defer func() {
if err != nil {
mms.Close()
mms = nil
}
}()
- for _, miFile := range md.Files() {
- fileName := filepath.Join(append([]string{location, md.Name()}, miFile.Path...)...)
+ for _, miFile := range upvertedSingleFileInfoFiles(md) {
+ fileName := filepath.Join(append([]string{location, md.Name}, miFile.Path...)...)
err = os.MkdirAll(filepath.Dir(fileName), 0777)
if err != nil {
return
Extended = 20
HandshakeExtendedID = 0
+
+ RequestMetadataExtensionMsgType = 0
+ DataMetadataExtensionMsgType = 1
+ RejectMetadataExtensionMsgType = 2
)
type Message struct {
package testutil
import (
+ "bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
- metainfo "github.com/nsf/libtorgo/torrent"
-
- "bytes"
+ "github.com/anacrolix/libtorgo/metainfo"
)
const GreetingFileContents = "hello, world\n"
package torrent
import (
+ "bitbucket.org/anacrolix/go.torrent/mmap_span"
+ pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
+ "bitbucket.org/anacrolix/go.torrent/tracker"
"container/list"
"fmt"
+ "github.com/anacrolix/libtorgo/bencode"
+ "github.com/anacrolix/libtorgo/metainfo"
"io"
"log"
"net"
"sort"
-
- "bitbucket.org/anacrolix/go.torrent/mmap_span"
- pp "bitbucket.org/anacrolix/go.torrent/peer_protocol"
- "bitbucket.org/anacrolix/go.torrent/tracker"
)
func (t *torrent) PieceNumPendingBytes(index pp.Integer) (count pp.Integer) {
InfoHash InfoHash
Pieces []*piece
Data mmap_span.MMapSpan
- Info MetaData
+ Info *metainfo.Info
Conns []*connection
Peers []Peer
Priorities *list.List
lastReadPiece int
DisplayName string
MetaData []byte
- MetaDataHave []bool
+ metadataHave []bool
+}
+
+func (t *torrent) InvalidateMetadata() {
+ for i := range t.metadataHave {
+ t.metadataHave[i] = false
+ }
+ t.Info = nil
+}
+
+func (t *torrent) SaveMetadataPiece(index int, data []byte) {
+ if t.haveInfo() {
+ return
+ }
+ copy(t.MetaData[(1<<14)*index:], data)
+ t.metadataHave[index] = true
+}
+
+func (t *torrent) MetadataPieceCount() int {
+ return (len(t.MetaData) + (1 << 14) - 1) / (1 << 14)
+}
+
+func (t *torrent) HaveMetadataPiece(piece int) bool {
+ return t.haveInfo() || t.metadataHave[piece]
}
-func (t *torrent) GotAllMetadataPieces() bool {
- if t.MetaDataHave == nil {
+func (t *torrent) metadataSizeKnown() bool {
+ return t.MetaData != nil
+}
+
+func (t *torrent) metadataSize() int {
+ return len(t.MetaData)
+}
+
+func infoPieceHashes(info *metainfo.Info) (ret []string) {
+ for i := 0; i < len(info.Pieces); i += 20 {
+ ret = append(ret, string(info.Pieces[i:i+20]))
+ }
+ return
+}
+
+func (t *torrent) setMetadata(md metainfo.Info, dataDir string, infoBytes []byte) (err error) {
+ t.Info = &md
+ t.MetaData = infoBytes
+ t.metadataHave = nil
+ t.Data, err = mmapTorrentData(&md, dataDir)
+ if err != nil {
+ return
+ }
+ for _, hash := range infoPieceHashes(&md) {
+ piece := &piece{}
+ copyHashSum(piece.Hash[:], []byte(hash))
+ t.Pieces = append(t.Pieces, piece)
+ t.pendAllChunkSpecs(pp.Integer(len(t.Pieces) - 1))
+ }
+ t.Priorities = list.New()
+ return
+}
+
+func (t *torrent) HaveAllMetadataPieces() bool {
+ if t.haveInfo() {
+ return true
+ }
+ if t.metadataHave == nil {
return false
}
- for _, have := range t.MetaDataHave {
+ for _, have := range t.metadataHave {
if !have {
return false
}
return true
}
-func (t *torrent) SetMetaDataSize(bytes int64) {
+func (t *torrent) SetMetadataSize(bytes int64) {
if t.MetaData != nil {
- if len(t.MetaData) != int(bytes) {
- log.Printf("new metadata_size differs")
- }
return
}
t.MetaData = make([]byte, bytes)
- t.MetaDataHave = make([]bool, (bytes+(1<<14)-1)/(1<<14))
+ t.metadataHave = make([]bool, (bytes+(1<<14)-1)/(1<<14))
}
func (t *torrent) Name() string {
- if t.Info == nil {
+ if !t.haveInfo() {
return t.DisplayName
}
- return t.Info.Name()
+ return t.Info.Name
}
func (t *torrent) pieceStatusChar(index int) byte {
}
}
+func (t *torrent) metadataPieceSize(piece int) int {
+ return metadataPieceSize(len(t.MetaData), piece)
+}
+
+func (t *torrent) NewMetadataExtensionMessage(c *connection, msgType int, piece int, data []byte) pp.Message {
+ d := map[string]int{
+ "msg_type": msgType,
+ "piece": piece,
+ }
+ if data != nil {
+ d["total_size"] = len(t.MetaData)
+ }
+ p, err := bencode.Marshal(d)
+ if err != nil {
+ panic(err)
+ }
+ return pp.Message{
+ Type: pp.Extended,
+ ExtendedID: byte(c.PeerExtensionIDs["ut_metadata"]),
+ ExtendedPayload: append(p, data...),
+ }
+
+}
+
func (t *torrent) WriteStatus(w io.Writer) {
fmt.Fprint(w, "Pieces: ")
for index := range t.Pieces {
}
func (t *torrent) UsualPieceSize() int {
- return int(t.Info.PieceLength())
+ return int(t.Info.PieceLength)
}
func (t *torrent) LastPieceSize() int {
}
func (t *torrent) NumPieces() int {
- return t.Info.PieceCount()
+ return len(t.Info.Pieces) / 20
}
func (t *torrent) NumPiecesCompleted() (num int) {
}
func (t *torrent) requestOffset(r request) int64 {
- return torrentRequestOffset(t.Length(), t.Info.PieceLength(), r)
+ return torrentRequestOffset(t.Length(), int64(t.UsualPieceSize()), r)
}
// Return the request that would include the given offset into the torrent data.
func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
- return torrentOffsetRequest(t.Length(), t.Info.PieceLength(), chunkSize, off)
+ return torrentOffsetRequest(t.Length(), t.Info.PieceLength, chunkSize, off)
}
func (t *torrent) WriteChunk(piece int, begin int64, data []byte) (err error) {
- _, err = t.Data.WriteAt(data, int64(piece)*t.Info.PieceLength()+begin)
+ _, err = t.Data.WriteAt(data, int64(piece)*t.Info.PieceLength+begin)
return
}
if piece.PendingChunkSpecs == nil {
piece.PendingChunkSpecs = make(
map[chunkSpec]struct{},
- (t.Info.PieceLength()+chunkSize-1)/chunkSize)
+ (t.Info.PieceLength+chunkSize-1)/chunkSize)
}
c := chunkSpec{
Begin: 0,
func (t *torrent) PieceLength(piece pp.Integer) (len_ pp.Integer) {
if int(piece) == t.NumPieces()-1 {
- len_ = pp.Integer(t.Data.Size() % t.Info.PieceLength())
+ len_ = pp.Integer(t.Data.Size() % t.Info.PieceLength)
}
if len_ == 0 {
- len_ = pp.Integer(t.Info.PieceLength())
+ len_ = pp.Integer(t.Info.PieceLength)
}
return
}
func (t *torrent) HashPiece(piece pp.Integer) (ps pieceSum) {
hash := pieceHash.New()
- n, err := t.Data.WriteSectionTo(hash, int64(piece)*t.Info.PieceLength(), t.Info.PieceLength())
+ n, err := t.Data.WriteSectionTo(hash, int64(piece)*t.Info.PieceLength, t.Info.PieceLength)
if err != nil {
panic(err)
}
if pp.Integer(n) != t.PieceLength(piece) {
+ log.Print(t.Info)
panic(fmt.Sprintf("hashed wrong number of bytes: expected %d; did %d; piece %d", t.PieceLength(piece), n, piece))
}
copyHashSum(ps[:], hash.Sum(nil))