]> Sergey Matveev's repositories - btrtrc.git/commitdiff
Merge pull request #9 from gitter-badger/gitter-badge
authorMatt Joiner <anacrolix@gmail.com>
Thu, 2 Jul 2015 10:29:54 +0000 (20:29 +1000)
committerMatt Joiner <anacrolix@gmail.com>
Thu, 2 Jul 2015 10:29:54 +0000 (20:29 +1000)
Add a Gitter chat badge to README.md

75 files changed:
README.md
TODO
bencode/README [new file with mode: 0644]
bencode/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent [new file with mode: 0644]
bencode/_testdata/continuum.torrent [new file with mode: 0644]
bencode/api.go [new file with mode: 0644]
bencode/both_test.go [new file with mode: 0644]
bencode/decode.go [new file with mode: 0644]
bencode/decode_test.go [new file with mode: 0644]
bencode/encode.go [new file with mode: 0644]
bencode/encode_test.go [new file with mode: 0644]
bencode/tags.go [new file with mode: 0644]
client.go
client_test.go
cmd/dht-get-peers/main.go
cmd/dht-ping/main.go
cmd/dht-server/main.go
cmd/magnet-metainfo/main.go
cmd/torrent-create/main.go
cmd/torrent-infohash/main.go
cmd/torrent-magnet/main.go
cmd/torrent-metainfo-pprint/main.go
cmd/torrent-pick/main.go [new file with mode: 0644]
cmd/torrent-verify/main.go
cmd/torrent/main.go
cmd/torrentfs/main.go
cmd/tracker-announce/main.go
config.go
connection.go
connection_test.go
data/blob/atime_darwin.go [deleted file]
data/blob/atime_linux.go [deleted file]
data/blob/blob.go
data/blob/store.go
data/data.go
data/file/file.go
data/mmap/mmap.go
dht/addr.go [new file with mode: 0644]
dht/announce.go [moved from dht/getpeers.go with 63% similarity]
dht/dht.go
dht/dht_test.go
dht/expvar.go [new file with mode: 0644]
doc.go [new file with mode: 0644]
example_test.go [new file with mode: 0644]
file.go [new file with mode: 0644]
fs/TODO [new file with mode: 0644]
fs/torrentfs.go
fs/torrentfs_test.go
internal/pieceordering/pieceordering.go
internal/testutil/testutil.go
iplist/iplist.go
magnet_test.go
metainfo/README [new file with mode: 0644]
metainfo/_testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent [new file with mode: 0644]
metainfo/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent [new file with mode: 0644]
metainfo/_testdata/continuum.torrent [new file with mode: 0644]
metainfo/_testdata/trackerless.torrent [new file with mode: 0644]
metainfo/builder.go [new file with mode: 0644]
metainfo/metainfo.go [new file with mode: 0644]
metainfo/metainfo_test.go [new file with mode: 0644]
misc.go
misc_test.go [new file with mode: 0644]
mmap_span/span.go
piece.go [new file with mode: 0644]
piecestate.go [new file with mode: 0644]
reader.go [new file with mode: 0644]
t.go [new file with mode: 0644]
testdata/bootstrap.dat.torrent [new file with mode: 0644]
torrent.go
torrent_test.go
tracker/http.go
tracker/udp.go
util/dirwatch/dirwatch.go
util/types.go
worst_conns.go

index 152a682f34c94008e467970f7dc7acb3078622f7..ff91f9a8ec02f34f4d24b61fd1d761574516bf30 100644 (file)
--- a/README.md
+++ b/README.md
@@ -9,6 +9,8 @@ This repository implements BitTorrent-related packages and command-line utilitie
 
 There is support for protocol encryption, DHT, PEX, uTP, and various extensions. There are several storage backends provided, blob, file, mmap. You can use the provided binaries in `./cmd`, or use `torrent` as a library for your own applications.
 
+See also the [mailing list](https://groups.google.com/forum/#!forum/go_torrent), and the [Gophers Slack channel](https://gophers.slack.com/#torrent).
+
 ## Installation
 
 Install the library package with `go get github.com/anacrolix/torrent`, or the provided cmds with `go get github.com/anacrolix/torrent/cmd/...`.
@@ -17,14 +19,53 @@ Install the library package with `go get github.com/anacrolix/torrent`, or the p
 
 There is a small example in the [package documentation](https://godoc.org/github.com/anacrolix/torrent).
 
-## Torrent utility
+## Commands
+
+Here I'll describe what some of the provided commands in `./cmd` do.
+
+Note that [`godo`](https://bitbucket.org/anacrolix/go-utils) that I invoke in the following examples is a command that builds and executes a Go import path, like `go run`. It's easier to use this convention than to spell out the install/invoke cycle for every single example.
+
+### torrent
 
-There's a provided utility that downloads torrents from the command-line.
+Downloads torrents from the command-line.
 
        $ go get github.com/anacrolix/torrent/cmd/torrent
-       $ torrent 'magnet:?xt=urn:btih:ZOCMZQIPFFW7OLLMIC5HUB6BPCSDEOQU'
-    2015/03/20 22:51:41 main.go:96: downloaded ALL the torrents
-    $ md5sum ubuntu-14.04.1-desktop-amd64.iso
-    119cb63b48c9a18f31f417f09655efbd  ubuntu-14.04.1-desktop-amd64.iso
+       $ torrent 'magnet:?xt=urn:btih:KRWPCX3SJUM4IMM4YF5RPHL6ANPYTQPU'
+    2015/04/01 02:08:20 main.go:137: downloaded ALL the torrents
+    $ md5sum ubuntu-14.04.2-desktop-amd64.iso
+    1b305d585b1918f297164add46784116  ubuntu-14.04.2-desktop-amd64.iso
     $ echo such amaze
     wow
+
+### torrentfs
+
+torrentfs mounts a FUSE filesystem at `-mountDir`. The contents are the torrents described by the torrent files and magnet links at `-torrentPath`. Data for read requests is fetched only as required from the torrent network, and stored at `-downloadDir`.
+
+    $ mkdir mnt torrents
+    $ godo github.com/anacrolix/torrent/cmd/torrentfs -mountDir mnt -torrentPath torrents &
+    $ cd torrents
+    $ wget http://releases.ubuntu.com/14.04.2/ubuntu-14.04.2-desktop-amd64.iso.torrent
+    $ cd ..
+    $ ls mnt
+    ubuntu-14.04.2-desktop-amd64.iso
+    $ pv mnt/ubuntu-14.04.2-desktop-amd64.iso | md5sum
+    996MB 0:04:40 [3.55MB/s] [========================================>] 100%
+    1b305d585b1918f297164add46784116  -
+
+### torrent-magnet
+
+Creates a magnet link from a torrent file. Note the extracted trackers, display name, and info hash.
+
+    $ godo github.com/anacrolix/torrent/cmd/torrent-magnet < ubuntu-14.04.2-desktop-amd64.iso.torrent
+       magnet:?xt=urn:btih:546cf15f724d19c4319cc17b179d7e035f89c1f4&dn=ubuntu-14.04.2-desktop-amd64.iso&tr=http%3A%2F%2Ftorrent.ubuntu.com%3A6969%2Fannounce&tr=http%3A%2F%2Fipv6.torrent.ubuntu.com%3A6969%2Fannounce
+
+### dht-ping
+
+Pings DHT nodes with the given network addresses.
+
+    $ godo ./cmd/dht-ping router.bittorrent.com:6881 router.utorrent.com:6881
+    2015/04/01 17:21:23 main.go:33: dht server on [::]:60058
+    32f54e697351ff4aec29cdbaabf2fbe3467cc267 (router.bittorrent.com:6881): 648.218621ms
+    ebff36697351ff4aec29cdbaabf2fbe3467cc267 (router.utorrent.com:6881): 873.864706ms
+    2/2 responses (100.000000%)
+
diff --git a/TODO b/TODO
index 72aabfc623f3fa1e5acefc526e0fa012417735a4..e5516862e48308dde3bb1ac8c23a389b932a9a11 100644 (file)
--- a/TODO
+++ b/TODO
@@ -1,7 +1,11 @@
  * Track upload and download data.
- * Emulate a UDP server in the UDP tracker tests.
- * Make use of sparse file regions in download data for faster hashing.
- * If we're choked and interested, we never send not-interested if there's nothing we want?
- * Don't announce torrents that don't need active peers. It spams UDP, fills memory, and publicizes what we have loaded.
- * Randomize triedAddrs bloom filter to allow different Addr sets on each Announce.
- * When lots of good connections, it'll do a huge readahead, then refuse to trickle new pieces because we sent not interested to them all, thereby reducing the number of unchoked connections.
\ No newline at end of file
+ * Emulate a UDP server in the UDP tracker tests rather than communicating with the Internet.
+ * Make use of sparse file regions in download data for faster hashing. This is available as whence 3 and 4 on some OS?
+ * When we're choked and interested, are we not interested if there's no longer anything that we want?
+ * dht: Randomize triedAddrs bloom filter to allow different Addr sets on each Announce.
+ * dht: Verify that the triedAddrs bloom filter is working well, github's willf made a bunch of changes.
+ * Rearrange the local-peer choked/interested status flags to be more natural to read.
+ * Check that pruning is working correctly. worstConns sorting might need an adjustment to how it factors in the good/unwanted chunks ratio.
+ * data/blob: Deleting incomplete data triggers io.ErrUnexpectedEOF that isn't recovered from.
+ * UL/DL rate-limiting.
+ * Handle Torrent being dropped before GotInfo.
diff --git a/bencode/README b/bencode/README
new file mode 100644 (file)
index 0000000..440bd5f
--- /dev/null
@@ -0,0 +1 @@
+Bencode encoding/decoding sub package. Uses similar API design to Go's json package.
diff --git a/bencode/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent b/bencode/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent
new file mode 100644 (file)
index 0000000..9ce7748
Binary files /dev/null and b/bencode/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent differ
diff --git a/bencode/_testdata/continuum.torrent b/bencode/_testdata/continuum.torrent
new file mode 100644 (file)
index 0000000..ac15b75
Binary files /dev/null and b/bencode/_testdata/continuum.torrent differ
diff --git a/bencode/api.go b/bencode/api.go
new file mode 100644 (file)
index 0000000..b4ddcec
--- /dev/null
@@ -0,0 +1,165 @@
+package bencode
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io"
+       "reflect"
+)
+
+//----------------------------------------------------------------------------
+// Errors
+//----------------------------------------------------------------------------
+
+// In case if marshaler cannot encode a type, it will return this error. Typical
+// example of such type is float32/float64 which has no bencode representation.
+type MarshalTypeError struct {
+       Type reflect.Type
+}
+
+func (this *MarshalTypeError) Error() string {
+       return "bencode: unsupported type: " + this.Type.String()
+}
+
+// Unmarshal argument must be a non-nil value of some pointer type.
+type UnmarshalInvalidArgError struct {
+       Type reflect.Type
+}
+
+func (e *UnmarshalInvalidArgError) Error() string {
+       if e.Type == nil {
+               return "bencode: Unmarshal(nil)"
+       }
+
+       if e.Type.Kind() != reflect.Ptr {
+               return "bencode: Unmarshal(non-pointer " + e.Type.String() + ")"
+       }
+       return "bencode: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+// Unmarshaler spotted a value that was not appropriate for a given Go value.
+type UnmarshalTypeError struct {
+       Value string
+       Type  reflect.Type
+}
+
+func (e *UnmarshalTypeError) Error() string {
+       return "bencode: value (" + e.Value + ") is not appropriate for type: " +
+               e.Type.String()
+}
+
+// Unmarshaler tried to write to an unexported (therefore unwritable) field.
+type UnmarshalFieldError struct {
+       Key   string
+       Type  reflect.Type
+       Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+       return "bencode: key \"" + e.Key + "\" led to an unexported field \"" +
+               e.Field.Name + "\" in type: " + e.Type.String()
+}
+
+// Malformed bencode input, unmarshaler failed to parse it.
+type SyntaxError struct {
+       Offset int64 // location of the error
+       What   error // error description
+}
+
+func (e *SyntaxError) Error() string {
+       return fmt.Sprintf("bencode: syntax error (offset: %d): %s", e.Offset, e.What)
+}
+
+// A non-nil error was returned after calling MarshalBencode on a type which
+// implements the Marshaler interface.
+type MarshalerError struct {
+       Type reflect.Type
+       Err  error
+}
+
+func (e *MarshalerError) Error() string {
+       return "bencode: error calling MarshalBencode for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+// A non-nil error was returned after calling UnmarshalBencode on a type which
+// implements the Unmarshaler interface.
+type UnmarshalerError struct {
+       Type reflect.Type
+       Err  error
+}
+
+func (e *UnmarshalerError) Error() string {
+       return "bencode: error calling UnmarshalBencode for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+//----------------------------------------------------------------------------
+// Interfaces
+//----------------------------------------------------------------------------
+
+// Any type which implements this interface, will be marshaled using the
+// specified method.
+type Marshaler interface {
+       MarshalBencode() ([]byte, error)
+}
+
+// Any type which implements this interface, will be unmarshaled using the
+// specified method.
+type Unmarshaler interface {
+       UnmarshalBencode([]byte) error
+}
+
+//----------------------------------------------------------------------------
+// Stateless interface
+//----------------------------------------------------------------------------
+
+// Marshal the value 'v' to the bencode form, return the result as []byte and an
+// error if any.
+func Marshal(v interface{}) ([]byte, error) {
+       var buf bytes.Buffer
+       e := encoder{Writer: bufio.NewWriter(&buf)}
+       err := e.encode(v)
+       if err != nil {
+               return nil, err
+       }
+       return buf.Bytes(), nil
+}
+
+// Unmarshal the bencode value in the 'data' to a value pointed by the 'v'
+// pointer, return a non-nil error if any.
+func Unmarshal(data []byte, v interface{}) error {
+       e := decoder{Reader: bufio.NewReader(bytes.NewBuffer(data))}
+       return e.decode(v)
+}
+
+//----------------------------------------------------------------------------
+// Stateful interface
+//----------------------------------------------------------------------------
+
+type Decoder struct {
+       d decoder
+}
+
+func NewDecoder(r io.Reader) *Decoder {
+       return &Decoder{decoder{Reader: bufio.NewReader(r)}}
+}
+
+func (d *Decoder) Decode(v interface{}) error {
+       return d.d.decode(v)
+}
+
+type Encoder struct {
+       e encoder
+}
+
+func NewEncoder(w io.Writer) *Encoder {
+       return &Encoder{encoder{Writer: bufio.NewWriter(w)}}
+}
+
+func (e *Encoder) Encode(v interface{}) error {
+       err := e.e.encode(v)
+       if err != nil {
+               return err
+       }
+       return nil
+}
diff --git a/bencode/both_test.go b/bencode/both_test.go
new file mode 100644 (file)
index 0000000..a000f35
--- /dev/null
@@ -0,0 +1,80 @@
+package bencode
+
+import "testing"
+import "bytes"
+import "io/ioutil"
+
+func load_file(name string, t *testing.T) []byte {
+       data, err := ioutil.ReadFile(name)
+       if err != nil {
+               t.Fatal(err)
+       }
+       return data
+}
+
+func test_file_interface(t *testing.T, filename string) {
+       data1 := load_file(filename, t)
+       var iface interface{}
+
+       err := Unmarshal(data1, &iface)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       data2, err := Marshal(iface)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if !bytes.Equal(data1, data2) {
+               t.Fatalf("equality expected\n")
+       }
+
+}
+
+func TestBothInterface(t *testing.T) {
+       test_file_interface(t, "_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent")
+       test_file_interface(t, "_testdata/continuum.torrent")
+}
+
+type torrent_file struct {
+       Info struct {
+               Name        string `bencode:"name"`
+               Length      int64  `bencode:"length"`
+               MD5Sum      string `bencode:"md5sum,omitempty"`
+               PieceLength int64  `bencode:"piece length"`
+               Pieces      string `bencode:"pieces"`
+               Private     bool   `bencode:"private,omitempty"`
+       } `bencode:"info"`
+
+       Announce     string      `bencode:"announce"`
+       AnnounceList [][]string  `bencode:"announce-list,omitempty"`
+       CreationDate int64       `bencode:"creation date,omitempty"`
+       Comment      string      `bencode:"comment,omitempty"`
+       CreatedBy    string      `bencode:"created by,omitempty"`
+       URLList      interface{} `bencode:"url-list,omitempty"`
+}
+
+func test_file(t *testing.T, filename string) {
+       data1 := load_file(filename, t)
+       var f torrent_file
+
+       err := Unmarshal(data1, &f)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       data2, err := Marshal(&f)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if !bytes.Equal(data1, data2) {
+               println(string(data2))
+               t.Fatalf("equality expected")
+       }
+}
+
+func TestBoth(t *testing.T) {
+       test_file(t, "_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent")
+}
diff --git a/bencode/decode.go b/bencode/decode.go
new file mode 100644 (file)
index 0000000..2ab9c01
--- /dev/null
@@ -0,0 +1,578 @@
+package bencode
+
+import (
+       "bufio"
+       "bytes"
+       "errors"
+       "io"
+       "reflect"
+       "runtime"
+       "strconv"
+       "strings"
+)
+
+type decoder struct {
+       *bufio.Reader
+       offset int64
+       buf    bytes.Buffer
+       key    string
+}
+
+func (d *decoder) decode(v interface{}) (err error) {
+       defer func() {
+               if e := recover(); e != nil {
+                       if _, ok := e.(runtime.Error); ok {
+                               panic(e)
+                       }
+                       err = e.(error)
+               }
+       }()
+
+       pv := reflect.ValueOf(v)
+       if pv.Kind() != reflect.Ptr || pv.IsNil() {
+               return &UnmarshalInvalidArgError{reflect.TypeOf(v)}
+       }
+
+       d.parse_value(pv.Elem())
+       return nil
+}
+
+func check_for_unexpected_eof(err error, offset int64) {
+       if err == io.EOF {
+               panic(&SyntaxError{
+                       Offset: offset,
+                       What:   io.ErrUnexpectedEOF,
+               })
+       }
+}
+
+func (d *decoder) read_byte() byte {
+       b, err := d.ReadByte()
+       if err != nil {
+               check_for_unexpected_eof(err, d.offset)
+               panic(err)
+       }
+
+       d.offset++
+       return b
+}
+
+// reads data writing it to 'd.buf' until 'sep' byte is encountered, 'sep' byte
+// is consumed, but not included into the 'd.buf'
+func (d *decoder) read_until(sep byte) {
+       for {
+               b := d.read_byte()
+               if b == sep {
+                       return
+               }
+               d.buf.WriteByte(b)
+       }
+}
+
+func check_for_int_parse_error(err error, offset int64) {
+       if err != nil {
+               panic(&SyntaxError{
+                       Offset: offset,
+                       What:   err,
+               })
+       }
+}
+
+// called when 'i' was consumed
+func (d *decoder) parse_int(v reflect.Value) {
+       start := d.offset - 1
+       d.read_until('e')
+       if d.buf.Len() == 0 {
+               panic(&SyntaxError{
+                       Offset: start,
+                       What:   errors.New("empty integer value"),
+               })
+       }
+
+       switch v.Kind() {
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               n, err := strconv.ParseInt(d.buf.String(), 10, 64)
+               check_for_int_parse_error(err, start)
+
+               if v.OverflowInt(n) {
+                       panic(&UnmarshalTypeError{
+                               Value: "integer " + d.buf.String(),
+                               Type:  v.Type(),
+                       })
+               }
+               v.SetInt(n)
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+               n, err := strconv.ParseUint(d.buf.String(), 10, 64)
+               check_for_int_parse_error(err, start)
+
+               if v.OverflowUint(n) {
+                       panic(&UnmarshalTypeError{
+                               Value: "integer " + d.buf.String(),
+                               Type:  v.Type(),
+                       })
+               }
+               v.SetUint(n)
+       case reflect.Bool:
+               v.SetBool(d.buf.String() != "0")
+       default:
+               panic(&UnmarshalTypeError{
+                       Value: "integer " + d.buf.String(),
+                       Type:  v.Type(),
+               })
+       }
+       d.buf.Reset()
+}
+
+func (d *decoder) parse_string(v reflect.Value) {
+       start := d.offset - 1
+
+       // read the string length first
+       d.read_until(':')
+       length, err := strconv.ParseInt(d.buf.String(), 10, 64)
+       check_for_int_parse_error(err, start)
+
+       d.buf.Reset()
+       n, err := io.CopyN(&d.buf, d, length)
+       d.offset += n
+       if err != nil {
+               check_for_unexpected_eof(err, d.offset)
+               panic(&SyntaxError{
+                       Offset: d.offset,
+                       What:   errors.New("unexpected I/O error: " + err.Error()),
+               })
+       }
+
+       switch v.Kind() {
+       case reflect.String:
+               v.SetString(d.buf.String())
+       case reflect.Slice:
+               if v.Type().Elem().Kind() != reflect.Uint8 {
+                       panic(&UnmarshalTypeError{
+                               Value: "string",
+                               Type:  v.Type(),
+                       })
+               }
+               sl := make([]byte, len(d.buf.Bytes()))
+               copy(sl, d.buf.Bytes())
+               v.Set(reflect.ValueOf(sl))
+       default:
+               panic(&UnmarshalTypeError{
+                       Value: "string",
+                       Type:  v.Type(),
+               })
+       }
+
+       d.buf.Reset()
+}
+
+func (d *decoder) parse_dict(v reflect.Value) {
+       switch v.Kind() {
+       case reflect.Map:
+               t := v.Type()
+               if t.Key().Kind() != reflect.String {
+                       panic(&UnmarshalTypeError{
+                               Value: "object",
+                               Type:  t,
+                       })
+               }
+               if v.IsNil() {
+                       v.Set(reflect.MakeMap(t))
+               }
+       case reflect.Struct:
+       default:
+               panic(&UnmarshalTypeError{
+                       Value: "object",
+                       Type:  v.Type(),
+               })
+       }
+
+       var map_elem reflect.Value
+
+       // so, at this point 'd' byte was consumed, let's just read key/value
+       // pairs one by one
+       for {
+               var valuev reflect.Value
+               keyv := reflect.ValueOf(&d.key).Elem()
+               if !d.parse_value(keyv) {
+                       return
+               }
+
+               // get valuev as a map value or as a struct field
+               switch v.Kind() {
+               case reflect.Map:
+                       elem_type := v.Type().Elem()
+                       if !map_elem.IsValid() {
+                               map_elem = reflect.New(elem_type).Elem()
+                       } else {
+                               map_elem.Set(reflect.Zero(elem_type))
+                       }
+                       valuev = map_elem
+               case reflect.Struct:
+                       var f reflect.StructField
+                       var ok bool
+
+                       t := v.Type()
+                       for i, n := 0, t.NumField(); i < n; i++ {
+                               f = t.Field(i)
+                               tag := f.Tag.Get("bencode")
+                               if tag == "-" {
+                                       continue
+                               }
+                               if f.Anonymous {
+                                       continue
+                               }
+
+                               tag_name, _ := parse_tag(tag)
+                               if tag_name == d.key {
+                                       ok = true
+                                       break
+                               }
+
+                               if f.Name == d.key {
+                                       ok = true
+                                       break
+                               }
+
+                               if strings.EqualFold(f.Name, d.key) {
+                                       ok = true
+                                       break
+                               }
+                       }
+
+                       if ok {
+                               if f.PkgPath != "" {
+                                       panic(&UnmarshalFieldError{
+                                               Key:   d.key,
+                                               Type:  v.Type(),
+                                               Field: f,
+                                       })
+                               } else {
+                                       valuev = v.FieldByIndex(f.Index)
+                               }
+                       } else {
+                               _, ok := d.parse_value_interface()
+                               if !ok {
+                                       panic(&SyntaxError{
+                                               Offset: d.offset,
+                                               What:   errors.New("unexpected end of dict, no matching value for a given key"),
+                                       })
+                               }
+                               continue
+                       }
+               }
+
+               // now we need to actually parse it
+               if !d.parse_value(valuev) {
+                       panic(&SyntaxError{
+                               Offset: d.offset,
+                               What:   errors.New("unexpected end of dict, no matching value for a given key"),
+                       })
+               }
+
+               if v.Kind() == reflect.Map {
+                       v.SetMapIndex(keyv, valuev)
+               }
+       }
+}
+
+func (d *decoder) parse_list(v reflect.Value) {
+       switch v.Kind() {
+       case reflect.Array, reflect.Slice:
+       default:
+               panic(&UnmarshalTypeError{
+                       Value: "array",
+                       Type:  v.Type(),
+               })
+       }
+
+       i := 0
+       for {
+               if v.Kind() == reflect.Slice && i >= v.Len() {
+                       v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+               }
+
+               ok := false
+               if i < v.Len() {
+                       ok = d.parse_value(v.Index(i))
+               } else {
+                       _, ok = d.parse_value_interface()
+               }
+
+               if !ok {
+                       break
+               }
+
+               i++
+       }
+
+       if i < v.Len() {
+               if v.Kind() == reflect.Array {
+                       z := reflect.Zero(v.Type().Elem())
+                       for n := v.Len(); i < n; i++ {
+                               v.Index(i).Set(z)
+                       }
+               } else {
+                       v.SetLen(i)
+               }
+       }
+
+       if i == 0 && v.Kind() == reflect.Slice {
+               v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+       }
+}
+
+func (d *decoder) read_one_value() bool {
+       b, err := d.ReadByte()
+       if err != nil {
+               panic(err)
+       }
+       if b == 'e' {
+               d.UnreadByte()
+               return false
+       } else {
+               d.offset++
+               d.buf.WriteByte(b)
+       }
+
+       switch b {
+       case 'd', 'l':
+               // read until there is nothing to read
+               for d.read_one_value() {
+               }
+               // consume 'e' as well
+               b = d.read_byte()
+               d.buf.WriteByte(b)
+       case 'i':
+               d.read_until('e')
+               d.buf.WriteString("e")
+       default:
+               if b >= '0' && b <= '9' {
+                       start := d.buf.Len() - 1
+                       d.read_until(':')
+                       length, err := strconv.ParseInt(d.buf.String()[start:], 10, 64)
+                       check_for_int_parse_error(err, d.offset-1)
+
+                       d.buf.WriteString(":")
+                       n, err := io.CopyN(&d.buf, d, length)
+                       d.offset += n
+                       if err != nil {
+                               check_for_unexpected_eof(err, d.offset)
+                               panic(&SyntaxError{
+                                       Offset: d.offset,
+                                       What:   errors.New("unexpected I/O error: " + err.Error()),
+                               })
+                       }
+                       break
+               }
+
+               // unknown value
+               panic(&SyntaxError{
+                       Offset: d.offset - 1,
+                       What:   errors.New("unknown value type (invalid bencode?)"),
+               })
+       }
+
+       return true
+
+}
+
+func (d *decoder) parse_unmarshaler(v reflect.Value) bool {
+       m, ok := v.Interface().(Unmarshaler)
+       if !ok {
+               // T doesn't work, try *T
+               if v.Kind() != reflect.Ptr && v.CanAddr() {
+                       m, ok = v.Addr().Interface().(Unmarshaler)
+                       if ok {
+                               v = v.Addr()
+                       }
+               }
+       }
+       if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+               if d.read_one_value() {
+                       err := m.UnmarshalBencode(d.buf.Bytes())
+                       d.buf.Reset()
+                       if err != nil {
+                               panic(&UnmarshalerError{v.Type(), err})
+                       }
+                       return true
+               }
+               d.buf.Reset()
+       }
+
+       return false
+}
+
+// returns true if there was a value and it's now stored in 'v', otherwise there
+// was an end symbol ("e") and no value was stored
+func (d *decoder) parse_value(v reflect.Value) bool {
+       // we support one level of indirection at the moment
+       if v.Kind() == reflect.Ptr {
+               // if the pointer is nil, allocate a new element of the type it
+               // points to
+               if v.IsNil() {
+                       v.Set(reflect.New(v.Type().Elem()))
+               }
+               v = v.Elem()
+       }
+
+       if d.parse_unmarshaler(v) {
+               return true
+       }
+
+       // common case: interface{}
+       if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+               iface, _ := d.parse_value_interface()
+               v.Set(reflect.ValueOf(iface))
+               return true
+       }
+
+       b, err := d.ReadByte()
+       if err != nil {
+               panic(err)
+       }
+       d.offset++
+
+       switch b {
+       case 'e':
+               return false
+       case 'd':
+               d.parse_dict(v)
+       case 'l':
+               d.parse_list(v)
+       case 'i':
+               d.parse_int(v)
+       default:
+               if b >= '0' && b <= '9' {
+                       // string
+                       // append first digit of the length to the buffer
+                       d.buf.WriteByte(b)
+                       d.parse_string(v)
+                       break
+               }
+
+               // unknown value
+               panic(&SyntaxError{
+                       Offset: d.offset - 1,
+                       What:   errors.New("unknown value type (invalid bencode?)"),
+               })
+       }
+
+       return true
+}
+
+func (d *decoder) parse_value_interface() (interface{}, bool) {
+       b, err := d.ReadByte()
+       if err != nil {
+               panic(err)
+       }
+       d.offset++
+
+       switch b {
+       case 'e':
+               return nil, false
+       case 'd':
+               return d.parse_dict_interface(), true
+       case 'l':
+               return d.parse_list_interface(), true
+       case 'i':
+               return d.parse_int_interface(), true
+       default:
+               if b >= '0' && b <= '9' {
+                       // string
+                       // append first digit of the length to the buffer
+                       d.buf.WriteByte(b)
+                       return d.parse_string_interface(), true
+               }
+
+               // unknown value
+               panic(&SyntaxError{
+                       Offset: d.offset - 1,
+                       What:   errors.New("unknown value type (invalid bencode?)"),
+               })
+       }
+}
+
+func (d *decoder) parse_int_interface() interface{} {
+       start := d.offset - 1
+       d.read_until('e')
+       if d.buf.Len() == 0 {
+               panic(&SyntaxError{
+                       Offset: start,
+                       What:   errors.New("empty integer value"),
+               })
+       }
+
+       n, err := strconv.ParseInt(d.buf.String(), 10, 64)
+       check_for_int_parse_error(err, start)
+       d.buf.Reset()
+       return n
+}
+
+func (d *decoder) parse_string_interface() interface{} {
+       start := d.offset - 1
+
+       // read the string length first
+       d.read_until(':')
+       length, err := strconv.ParseInt(d.buf.String(), 10, 64)
+       check_for_int_parse_error(err, start)
+
+       d.buf.Reset()
+       n, err := io.CopyN(&d.buf, d, length)
+       d.offset += n
+       if err != nil {
+               check_for_unexpected_eof(err, d.offset)
+               panic(&SyntaxError{
+                       Offset: d.offset,
+                       What:   errors.New("unexpected I/O error: " + err.Error()),
+               })
+       }
+
+       s := d.buf.String()
+       d.buf.Reset()
+       return s
+}
+
+func (d *decoder) parse_dict_interface() interface{} {
+       dict := make(map[string]interface{})
+       for {
+               keyi, ok := d.parse_value_interface()
+               if !ok {
+                       break
+               }
+
+               key, ok := keyi.(string)
+               if !ok {
+                       panic(&SyntaxError{
+                               Offset: d.offset,
+                               What:   errors.New("non-string key in a dict"),
+                       })
+               }
+
+               valuei, ok := d.parse_value_interface()
+               if !ok {
+                       panic(&SyntaxError{
+                               Offset: d.offset,
+                               What:   errors.New("unexpected end of dict, no matching value for a given key"),
+                       })
+               }
+
+               dict[key] = valuei
+       }
+       return dict
+}
+
+func (d *decoder) parse_list_interface() interface{} {
+       var list []interface{}
+       for {
+               valuei, ok := d.parse_value_interface()
+               if !ok {
+                       break
+               }
+
+               list = append(list, valuei)
+       }
+       if list == nil {
+               list = make([]interface{}, 0, 0)
+       }
+       return list
+}
diff --git a/bencode/decode_test.go b/bencode/decode_test.go
new file mode 100644 (file)
index 0000000..b0714a7
--- /dev/null
@@ -0,0 +1,77 @@
+package bencode
+
+import "testing"
+import "reflect"
+
+type random_decode_test struct {
+       data     string
+       expected interface{}
+}
+
+var random_decode_tests = []random_decode_test{
+       {"i57e", int64(57)},
+       {"i-9223372036854775808e", int64(-9223372036854775808)},
+       {"5:hello", "hello"},
+       {"29:unicode test Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ°", "unicode test Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ°"},
+       {"d1:ai5e1:b5:helloe", map[string]interface{}{"a": int64(5), "b": "hello"}},
+       {"li5ei10ei15ei20e7:bencodee",
+               []interface{}{int64(5), int64(10), int64(15), int64(20), "bencode"}},
+       {"ldedee", []interface{}{map[string]interface{}{}, map[string]interface{}{}}},
+       {"le", []interface{}{}},
+}
+
+func TestRandomDecode(t *testing.T) {
+       for _, test := range random_decode_tests {
+               var value interface{}
+               err := Unmarshal([]byte(test.data), &value)
+               if err != nil {
+                       t.Error(err)
+                       continue
+               }
+               if !reflect.DeepEqual(test.expected, value) {
+                       t.Errorf("got: %v (%T), expected: %v (%T)\n",
+                               value, value, test.expected, test.expected)
+               }
+       }
+}
+
+func check_error(t *testing.T, err error) {
+       if err != nil {
+               t.Error(err)
+       }
+}
+
+func assert_equal(t *testing.T, x, y interface{}) {
+       if !reflect.DeepEqual(x, y) {
+               t.Errorf("got: %v (%T), expected: %v (%T)\n", x, x, y, y)
+       }
+}
+
+type unmarshaler_int struct {
+       x int
+}
+
+func (this *unmarshaler_int) UnmarshalBencode(data []byte) error {
+       return Unmarshal(data, &this.x)
+}
+
+type unmarshaler_string struct {
+       x string
+}
+
+func (this *unmarshaler_string) UnmarshalBencode(data []byte) error {
+       this.x = string(data)
+       return nil
+}
+
+func TestUnmarshalerBencode(t *testing.T) {
+       var i unmarshaler_int
+       var ss []unmarshaler_string
+       check_error(t, Unmarshal([]byte("i71e"), &i))
+       assert_equal(t, i.x, 71)
+       check_error(t, Unmarshal([]byte("l5:hello5:fruit3:waye"), &ss))
+       assert_equal(t, ss[0].x, "5:hello")
+       assert_equal(t, ss[1].x, "5:fruit")
+       assert_equal(t, ss[2].x, "3:way")
+
+}
diff --git a/bencode/encode.go b/bencode/encode.go
new file mode 100644 (file)
index 0000000..196aa09
--- /dev/null
@@ -0,0 +1,248 @@
+package bencode
+
+import "bufio"
+import "reflect"
+import "runtime"
+import "strconv"
+import "sync"
+import "sort"
+
+func is_empty_value(v reflect.Value) bool {
+       switch v.Kind() {
+       case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+               return v.Len() == 0
+       case reflect.Bool:
+               return !v.Bool()
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return v.Int() == 0
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               return v.Uint() == 0
+       case reflect.Float32, reflect.Float64:
+               return v.Float() == 0
+       case reflect.Interface, reflect.Ptr:
+               return v.IsNil()
+       }
+       return false
+}
+
+type encoder struct {
+       *bufio.Writer
+       scratch [64]byte
+}
+
+func (e *encoder) encode(v interface{}) (err error) {
+       defer func() {
+               if e := recover(); e != nil {
+                       if _, ok := e.(runtime.Error); ok {
+                               panic(e)
+                       }
+                       err = e.(error)
+               }
+       }()
+       e.reflect_value(reflect.ValueOf(v))
+       return e.Flush()
+}
+
+type string_values []reflect.Value
+
+func (sv string_values) Len() int           { return len(sv) }
+func (sv string_values) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv string_values) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv string_values) get(i int) string   { return sv[i].String() }
+
+func (e *encoder) write(s []byte) {
+       _, err := e.Write(s)
+       if err != nil {
+               panic(err)
+       }
+}
+
+func (e *encoder) write_string(s string) {
+       _, err := e.WriteString(s)
+       if err != nil {
+               panic(err)
+       }
+}
+
+func (e *encoder) reflect_string(s string) {
+       b := strconv.AppendInt(e.scratch[:0], int64(len(s)), 10)
+       e.write(b)
+       e.write_string(":")
+       e.write_string(s)
+}
+
+func (e *encoder) reflect_byte_slice(s []byte) {
+       b := strconv.AppendInt(e.scratch[:0], int64(len(s)), 10)
+       e.write(b)
+       e.write_string(":")
+       e.write(s)
+}
+
+// returns true if the value implements Marshaler interface and marshaling was
+// done successfully
+func (e *encoder) reflect_marshaler(v reflect.Value) bool {
+       m, ok := v.Interface().(Marshaler)
+       if !ok {
+               // T doesn't work, try *T
+               if v.Kind() != reflect.Ptr && v.CanAddr() {
+                       m, ok = v.Addr().Interface().(Marshaler)
+                       if ok {
+                               v = v.Addr()
+                       }
+               }
+       }
+       if ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+               data, err := m.MarshalBencode()
+               if err != nil {
+                       panic(&MarshalerError{v.Type(), err})
+               }
+               e.write(data)
+               return true
+       }
+
+       return false
+}
+
+func (e *encoder) reflect_value(v reflect.Value) {
+       if !v.IsValid() {
+               return
+       }
+
+       if e.reflect_marshaler(v) {
+               return
+       }
+
+       switch v.Kind() {
+       case reflect.Bool:
+               if v.Bool() {
+                       e.write_string("i1e")
+               } else {
+                       e.write_string("i0e")
+               }
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+               e.write_string("i")
+               e.write(b)
+               e.write_string("e")
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+               b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+               e.write_string("i")
+               e.write(b)
+               e.write_string("e")
+       case reflect.String:
+               e.reflect_string(v.String())
+       case reflect.Struct:
+               e.write_string("d")
+               for _, ef := range encode_fields(v.Type()) {
+                       field_value := v.Field(ef.i)
+                       if ef.omit_empty && is_empty_value(field_value) {
+                               continue
+                       }
+
+                       e.reflect_string(ef.tag)
+                       e.reflect_value(field_value)
+               }
+               e.write_string("e")
+       case reflect.Map:
+               if v.Type().Key().Kind() != reflect.String {
+                       panic(&MarshalTypeError{v.Type()})
+               }
+               if v.IsNil() {
+                       e.write_string("de")
+                       break
+               }
+               e.write_string("d")
+               sv := string_values(v.MapKeys())
+               sort.Sort(sv)
+               for _, key := range sv {
+                       e.reflect_string(key.String())
+                       e.reflect_value(v.MapIndex(key))
+               }
+               e.write_string("e")
+       case reflect.Slice:
+               if v.IsNil() {
+                       e.write_string("le")
+                       break
+               }
+               if v.Type().Elem().Kind() == reflect.Uint8 {
+                       s := v.Bytes()
+                       e.reflect_byte_slice(s)
+                       break
+               }
+               fallthrough
+       case reflect.Array:
+               e.write_string("l")
+               for i, n := 0, v.Len(); i < n; i++ {
+                       e.reflect_value(v.Index(i))
+               }
+               e.write_string("e")
+       case reflect.Interface, reflect.Ptr:
+               if v.IsNil() {
+                       break
+               }
+               e.reflect_value(v.Elem())
+       default:
+               panic(&MarshalTypeError{v.Type()})
+       }
+}
+
+type encode_field struct {
+       i          int
+       tag        string
+       omit_empty bool
+}
+
+type encode_fields_sort_type []encode_field
+
+func (ef encode_fields_sort_type) Len() int           { return len(ef) }
+func (ef encode_fields_sort_type) Swap(i, j int)      { ef[i], ef[j] = ef[j], ef[i] }
+func (ef encode_fields_sort_type) Less(i, j int) bool { return ef[i].tag < ef[j].tag }
+
+var (
+       type_cache_lock     sync.RWMutex
+       encode_fields_cache = make(map[reflect.Type][]encode_field)
+)
+
+func encode_fields(t reflect.Type) []encode_field {
+       type_cache_lock.RLock()
+       fs, ok := encode_fields_cache[t]
+       type_cache_lock.RUnlock()
+       if ok {
+               return fs
+       }
+
+       type_cache_lock.Lock()
+       defer type_cache_lock.Unlock()
+       fs, ok = encode_fields_cache[t]
+       if ok {
+               return fs
+       }
+
+       for i, n := 0, t.NumField(); i < n; i++ {
+               f := t.Field(i)
+               if f.PkgPath != "" {
+                       continue
+               }
+               if f.Anonymous {
+                       continue
+               }
+               var ef encode_field
+               ef.i = i
+               ef.tag = f.Name
+
+               tv := f.Tag.Get("bencode")
+               if tv != "" {
+                       if tv == "-" {
+                               continue
+                       }
+                       name, opts := parse_tag(tv)
+                       ef.tag = name
+                       ef.omit_empty = opts.contains("omitempty")
+               }
+               fs = append(fs, ef)
+       }
+       fss := encode_fields_sort_type(fs)
+       sort.Sort(fss)
+       encode_fields_cache[t] = fs
+       return fs
+}
diff --git a/bencode/encode_test.go b/bencode/encode_test.go
new file mode 100644 (file)
index 0000000..ba19173
--- /dev/null
@@ -0,0 +1,68 @@
+package bencode
+
+import "testing"
+import "bytes"
+import "fmt"
+
+type random_encode_test struct {
+       value    interface{}
+       expected string
+}
+
+type random_struct struct {
+       ABC         int    `bencode:"abc"`
+       SkipThisOne string `bencode:"-"`
+       CDE         string
+}
+
+type dummy struct {
+       a, b, c int
+}
+
+func (d *dummy) MarshalBencode() ([]byte, error) {
+       var b bytes.Buffer
+       _, err := fmt.Fprintf(&b, "i%dei%dei%de", d.a+1, d.b+1, d.c+1)
+       if err != nil {
+               return nil, err
+       }
+       return b.Bytes(), nil
+}
+
+var random_encode_tests = []random_encode_test{
+       {int(10), "i10e"},
+       {uint(10), "i10e"},
+       {"hello, world", "12:hello, world"},
+       {true, "i1e"},
+       {false, "i0e"},
+       {int8(-8), "i-8e"},
+       {int16(-16), "i-16e"},
+       {int32(32), "i32e"},
+       {int64(-64), "i-64e"},
+       {uint8(8), "i8e"},
+       {uint16(16), "i16e"},
+       {uint32(32), "i32e"},
+       {uint64(64), "i64e"},
+       {random_struct{123, "nono", "hello"}, "d3:CDE5:hello3:abci123ee"},
+       {map[string]string{"a": "b", "c": "d"}, "d1:a1:b1:c1:de"},
+       {[]byte{1, 2, 3, 4}, "4:\x01\x02\x03\x04"},
+       {[4]byte{1, 2, 3, 4}, "li1ei2ei3ei4ee"},
+       {nil, ""},
+       {[]byte{}, "0:"},
+       {"", "0:"},
+       {[]int{}, "le"},
+       {map[string]int{}, "de"},
+       {&dummy{1, 2, 3}, "i2ei3ei4e"},
+}
+
+func TestRandomEncode(t *testing.T) {
+       for _, test := range random_encode_tests {
+               data, err := Marshal(test.value)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               if !bytes.Equal(data, []byte(test.expected)) {
+                       t.Errorf("got: %s, expected: %s\n",
+                               string(data), string(test.expected))
+               }
+       }
+}
diff --git a/bencode/tags.go b/bencode/tags.go
new file mode 100644 (file)
index 0000000..0943b41
--- /dev/null
@@ -0,0 +1,34 @@
+package bencode
+
+import (
+       "strings"
+)
+
+type tag_options string
+
+func parse_tag(tag string) (string, tag_options) {
+       if idx := strings.Index(tag, ","); idx != -1 {
+               return tag[:idx], tag_options(tag[idx+1:])
+       }
+       return tag, tag_options("")
+}
+
+func (this tag_options) contains(option_name string) bool {
+       if len(this) == 0 {
+               return false
+       }
+
+       s := string(this)
+       for s != "" {
+               var next string
+               i := strings.Index(s, ",")
+               if i != -1 {
+                       s, next = s[:i], s[i+1:]
+               }
+               if s == option_name {
+                       return true
+               }
+               s = next
+       }
+       return false
+}
index f27db6a906635d1e2e69e7fa1f4315cd3064a17f..5a8b56e98a81914ea8a69b03edb74ae77807194a 100644 (file)
--- a/client.go
+++ b/client.go
@@ -1,23 +1,8 @@
-/*
-Package torrent implements a torrent client.
-
-Simple example:
-
-       c, _ := torrent.NewClient(&torrent.Config{})
-       defer c.Close()
-       t, _ := c.AddMagnet("magnet:?xt=urn:btih:ZOCMZQIPFFW7OLLMIC5HUB6BPCSDEOQU")
-       t.DownloadAll()
-       c.WaitAll()
-       log.Print("ermahgerd, torrent downloaded")
-
-
-*/
 package torrent
 
 import (
        "bufio"
        "bytes"
-       "container/heap"
        "crypto/rand"
        "crypto/sha1"
        "encoding/hex"
@@ -38,18 +23,18 @@ import (
        "syscall"
        "time"
 
-       "github.com/anacrolix/libtorgo/bencode"
-       "github.com/anacrolix/libtorgo/metainfo"
        "github.com/anacrolix/sync"
        "github.com/anacrolix/utp"
        "github.com/bradfitz/iter"
 
+       "github.com/anacrolix/torrent/bencode"
        "github.com/anacrolix/torrent/data"
        filePkg "github.com/anacrolix/torrent/data/file"
        "github.com/anacrolix/torrent/dht"
        "github.com/anacrolix/torrent/internal/pieceordering"
        "github.com/anacrolix/torrent/iplist"
        "github.com/anacrolix/torrent/logonce"
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/mse"
        pp "github.com/anacrolix/torrent/peer_protocol"
        "github.com/anacrolix/torrent/tracker"
@@ -57,21 +42,30 @@ import (
 )
 
 var (
-       unusedDownloadedChunksCount = expvar.NewInt("unusedDownloadedChunksCount")
-       chunksDownloadedCount       = expvar.NewInt("chunksDownloadedCount")
-       peersFoundByDHT             = expvar.NewInt("peersFoundByDHT")
-       peersFoundByPEX             = expvar.NewInt("peersFoundByPEX")
-       peersFoundByTracker         = expvar.NewInt("peersFoundByTracker")
-       uploadChunksPosted          = expvar.NewInt("uploadChunksPosted")
-       unexpectedCancels           = expvar.NewInt("unexpectedCancels")
-       postedCancels               = expvar.NewInt("postedCancels")
-       duplicateConnsAvoided       = expvar.NewInt("duplicateConnsAvoided")
-       failedPieceHashes           = expvar.NewInt("failedPieceHashes")
-       unsuccessfulDials           = expvar.NewInt("unsuccessfulDials")
-       successfulDials             = expvar.NewInt("successfulDials")
-       acceptedConns               = expvar.NewInt("acceptedConns")
-       inboundConnsBlocked         = expvar.NewInt("inboundConnsBlocked")
-       peerExtensions              = expvar.NewMap("peerExtensions")
+       unwantedChunksReceived   = expvar.NewInt("chunksReceivedUnwanted")
+       unexpectedChunksReceived = expvar.NewInt("chunksReceivedUnexpected")
+       chunksReceived           = expvar.NewInt("chunksReceived")
+
+       peersFoundByDHT     = expvar.NewInt("peersFoundByDHT")
+       peersFoundByPEX     = expvar.NewInt("peersFoundByPEX")
+       peersFoundByTracker = expvar.NewInt("peersFoundByTracker")
+
+       uploadChunksPosted    = expvar.NewInt("uploadChunksPosted")
+       unexpectedCancels     = expvar.NewInt("unexpectedCancels")
+       postedCancels         = expvar.NewInt("postedCancels")
+       duplicateConnsAvoided = expvar.NewInt("duplicateConnsAvoided")
+
+       pieceHashedCorrect    = expvar.NewInt("pieceHashedCorrect")
+       pieceHashedNotCorrect = expvar.NewInt("pieceHashedNotCorrect")
+
+       unsuccessfulDials = expvar.NewInt("dialSuccessful")
+       successfulDials   = expvar.NewInt("dialUnsuccessful")
+
+       acceptUTP    = expvar.NewInt("acceptUTP")
+       acceptTCP    = expvar.NewInt("acceptTCP")
+       acceptReject = expvar.NewInt("acceptReject")
+
+       peerExtensions = expvar.NewMap("peerExtensions")
        // Count of connections to peer with same client ID.
        connsToSelf = expvar.NewInt("connsToSelf")
        // Number of completed connections to a client we're already connected with.
@@ -90,7 +84,7 @@ const (
        //   Disabled until AllowedFast is implemented
        defaultExtensionBytes = "\x00\x00\x00\x00\x00\x10\x00\x01"
 
-       socketsPerTorrent     = 40
+       socketsPerTorrent     = 80
        torrentPeersHighWater = 200
        torrentPeersLowWater  = 50
 
@@ -102,10 +96,14 @@ const (
 
        pruneInterval = 10 * time.Second
 
+       // These are our extended message IDs.
        metadataExtendedId = iota + 1 // 0 is reserved for deleting keys
        pexExtendedId
 
-       extendedHandshakeClientVersion = "go.torrent dev 20140825"
+       // Updated occasionally to when there's been some changes to client
+       // behaviour in case other clients are assuming anything of us. See also
+       // `bep20`.
+       extendedHandshakeClientVersion = "go.torrent dev 20150624"
 )
 
 // Currently doesn't really queue, but should in the future.
@@ -128,23 +126,19 @@ func (cl *Client) queueFirstHash(t *torrent, piece int) {
        cl.queuePieceCheck(t, pp.Integer(piece))
 }
 
+// Clients contain zero or more Torrents. A client manages a blocklist, the
+// TCP/UDP protocol ports, and DHT as desired.
 type Client struct {
-       noUpload        bool
-       dataDir         string
-       halfOpenLimit   int
-       peerID          [20]byte
-       listeners       []net.Listener
-       utpSock         *utp.Socket
-       disableTrackers bool
-       dHT             *dht.Server
-       disableUTP      bool
-       disableTCP      bool
-       ipBlockList     *iplist.IPList
-       bannedTorrents  map[InfoHash]struct{}
-       _configDir      string
-       config          Config
-       pruneTimer      *time.Timer
-       extensionBytes  peerExtensionBytes
+       halfOpenLimit  int
+       peerID         [20]byte
+       listeners      []net.Listener
+       utpSock        *utp.Socket
+       dHT            *dht.Server
+       ipBlockList    *iplist.IPList
+       bannedTorrents map[InfoHash]struct{}
+       config         Config
+       pruneTimer     *time.Timer
+       extensionBytes peerExtensionBytes
        // Set of addresses that have our client ID. This intentionally will
        // include ourselves if we end up trying to connect to our own address
        // through legitimate channels.
@@ -180,10 +174,8 @@ func (me *Client) PeerID() string {
 
 func (me *Client) ListenAddr() (addr net.Addr) {
        for _, l := range me.listeners {
-               if addr != nil && l.Addr().String() != addr.String() {
-                       panic("listeners exist on different addresses")
-               }
                addr = l.Addr()
+               break
        }
        return
 }
@@ -228,14 +220,14 @@ func (cl *Client) WriteStatus(_w io.Writer) {
        } else {
                fmt.Fprintln(w, "Not listening!")
        }
-       fmt.Fprintf(w, "Peer ID: %q\n", cl.peerID)
+       fmt.Fprintf(w, "Peer ID: %+q\n", cl.peerID)
        if cl.dHT != nil {
                dhtStats := cl.dHT.Stats()
-               fmt.Fprintf(w, "DHT nodes: %d (%d good)\n", dhtStats.NumNodes, dhtStats.NumGoodNodes)
-               fmt.Fprintf(w, "DHT Server ID: %x\n", cl.dHT.IDString())
-               fmt.Fprintf(w, "DHT port: %d\n", addrPort(cl.dHT.LocalAddr()))
-               fmt.Fprintf(w, "DHT announces: %d\n", cl.dHT.NumConfirmedAnnounces)
-               fmt.Fprintf(w, "Outstanding transactions: %d\n", dhtStats.NumOutstandingTransactions)
+               fmt.Fprintf(w, "DHT nodes: %d (%d good)\n", dhtStats.Nodes, dhtStats.GoodNodes)
+               fmt.Fprintf(w, "DHT Server ID: %x\n", cl.dHT.ID())
+               fmt.Fprintf(w, "DHT port: %d\n", addrPort(cl.dHT.Addr()))
+               fmt.Fprintf(w, "DHT announces: %d\n", dhtStats.ConfirmedAnnounces)
+               fmt.Fprintf(w, "Outstanding transactions: %d\n", dhtStats.OutstandingTransactions)
        }
        fmt.Fprintf(w, "# Torrents: %d\n", len(cl.torrents))
        fmt.Fprintln(w)
@@ -252,85 +244,27 @@ func (cl *Client) WriteStatus(_w io.Writer) {
                        w.WriteString("<missing metainfo>")
                }
                fmt.Fprint(w, "\n")
-               t.writeStatus(w)
+               t.writeStatus(w, cl)
                fmt.Fprintln(w)
        }
 }
 
-// Read torrent data at the given offset. Will block until it is available.
-func (cl *Client) torrentReadAt(t *torrent, off int64, p []byte) (n int, err error) {
-       cl.mu.Lock()
-       defer cl.mu.Unlock()
-       index := int(off / int64(t.usualPieceSize()))
-       // Reading outside the bounds of a file is an error.
-       if index < 0 {
-               err = os.ErrInvalid
-               return
-       }
-       if int(index) >= len(t.Pieces) {
-               err = io.EOF
-               return
-       }
-       pieceOff := pp.Integer(off % int64(t.usualPieceSize()))
-       pieceLeft := int(t.pieceLength(index) - pieceOff)
-       if pieceLeft <= 0 {
-               err = io.EOF
-               return
-       }
-       if len(p) > pieceLeft {
-               p = p[:pieceLeft]
-       }
-       if len(p) == 0 {
-               panic(len(p))
-       }
-       // TODO: ReadAt should always try to fill the buffer.
-       for {
-               avail := cl.prepareRead(t, off)
-               if avail < int64(len(p)) {
-                       p = p[:avail]
-               }
-               n, err = dataReadAt(t.data, p, off)
-               if n != 0 || err != io.ErrUnexpectedEOF {
-                       break
-               }
-               // If we reach here, the data we thought was ready, isn't. So we
-               // prepare it again, and retry.
-       }
-       return
-}
-
-// Sets priorities to download from the given offset. Returns when the piece
-// at the given offset can be read. Returns the number of bytes that are
-// immediately available from the offset.
-func (cl *Client) prepareRead(t *torrent, off int64) (n int64) {
-       index := int(off / int64(t.usualPieceSize()))
-       // Reading outside the bounds of a file is an error.
-       if index < 0 || index >= t.numPieces() {
-               return
-       }
-       piece := t.Pieces[index]
-       cl.readRaisePiecePriorities(t, off)
-       for !t.pieceComplete(index) && !t.isClosed() {
-               // This is to prevent being starved if a piece is dropped before we
-               // can read it.
-               cl.readRaisePiecePriorities(t, off)
-               piece.Event.Wait()
-       }
-       return t.Info.Piece(index).Length() - off%t.Info.PieceLength
-}
-
-func (T Torrent) prepareRead(off int64) (avail int64) {
-       T.cl.mu.Lock()
-       defer T.cl.mu.Unlock()
-       return T.cl.prepareRead(T.torrent, off)
-}
-
-// Data implements a streaming interface that's more efficient than ReadAt.
+// A Data that implements this has a streaming interface that should be
+// preferred over ReadAt. For example, the data is stored in blocks on the
+// network and have a fixed cost to open.
 type SectionOpener interface {
+       // Open a ReadCloser at the given offset into torrent data. n is how many
+       // bytes we intend to read.
        OpenSection(off, n int64) (io.ReadCloser, error)
 }
 
 func dataReadAt(d data.Data, b []byte, off int64) (n int, err error) {
+       // defer func() {
+       //      if err == io.ErrUnexpectedEOF && n != 0 {
+       //              err = nil
+       //      }
+       // }()
+       // log.Println("data read at", len(b), off)
 again:
        if ra, ok := d.(io.ReaderAt); ok {
                return ra.ReadAt(b, off)
@@ -353,34 +287,68 @@ again:
 
 // Calculates the number of pieces to set to Readahead priority, after the
 // Now, and Next pieces.
-func readaheadPieces(readahead, pieceLength int64) int {
-       return int((readahead+pieceLength-1)/pieceLength - 1)
+func readaheadPieces(readahead, pieceLength int64) (ret int) {
+       // Expand the readahead to fit any partial pieces. Subtract 1 for the
+       // "next" piece that is assigned.
+       ret = int((readahead+pieceLength-1)/pieceLength - 1)
+       // Lengthen the "readahead tail" to smooth blockiness that occurs when the
+       // piece length is much larger than the readahead.
+       if ret < 2 {
+               ret++
+       }
+       return
 }
 
-func (cl *Client) readRaisePiecePriorities(t *torrent, off int64) {
+func (cl *Client) readRaisePiecePriorities(t *torrent, off, readaheadBytes int64) {
        index := int(off / int64(t.usualPieceSize()))
-       cl.raisePiecePriority(t, index, piecePriorityNow)
+       cl.raisePiecePriority(t, index, PiecePriorityNow)
        index++
        if index >= t.numPieces() {
                return
        }
-       cl.raisePiecePriority(t, index, piecePriorityNext)
-       for range iter.N(readaheadPieces(5*1024*1024, t.Info.PieceLength)) {
+       cl.raisePiecePriority(t, index, PiecePriorityNext)
+       for range iter.N(readaheadPieces(readaheadBytes, t.Info.PieceLength)) {
                index++
                if index >= t.numPieces() {
                        break
                }
-               cl.raisePiecePriority(t, index, piecePriorityReadahead)
+               cl.raisePiecePriority(t, index, PiecePriorityReadahead)
+       }
+}
+
+func (cl *Client) addUrgentRequests(t *torrent, off int64, n int) {
+       for n > 0 {
+               req, ok := t.offsetRequest(off)
+               if !ok {
+                       break
+               }
+               if _, ok := t.urgent[req]; !ok && !t.haveChunk(req) {
+                       if t.urgent == nil {
+                               t.urgent = make(map[request]struct{}, (n+chunkSize-1)/chunkSize)
+                       }
+                       t.urgent[req] = struct{}{}
+                       cl.event.Broadcast() // Why?
+                       index := int(req.Index)
+                       cl.queueFirstHash(t, index)
+                       cl.pieceChanged(t, index)
+               }
+               reqOff := t.requestOffset(req)
+               n1 := req.Length - pp.Integer(off-reqOff)
+               off += int64(n1)
+               n -= int(n1)
        }
+       // log.Print(t.urgent)
 }
 
 func (cl *Client) configDir() string {
-       if cl._configDir == "" {
+       if cl.config.ConfigDir == "" {
                return filepath.Join(os.Getenv("HOME"), ".config/torrent")
        }
-       return cl._configDir
+       return cl.config.ConfigDir
 }
 
+// The directory where the Client expects to find and store configuration
+// data. Defaults to $HOME/.config/torrent.
 func (cl *Client) ConfigDir() string {
        return cl.configDir()
 }
@@ -391,7 +359,6 @@ func (t *torrent) connPendPiece(c *connection, piece int) {
 
 func (cl *Client) raisePiecePriority(t *torrent, piece int, priority piecePriority) {
        if t.Pieces[piece].Priority < priority {
-               cl.event.Broadcast()
                cl.prioritizePiece(t, piece, priority)
        }
 }
@@ -400,9 +367,14 @@ func (cl *Client) prioritizePiece(t *torrent, piece int, priority piecePriority)
        if t.havePiece(piece) {
                return
        }
-       cl.queueFirstHash(t, piece)
-       t.Pieces[piece].Priority = priority
-       cl.pieceChanged(t, piece)
+       if priority != PiecePriorityNone {
+               cl.queueFirstHash(t, piece)
+       }
+       p := t.Pieces[piece]
+       if p.Priority != priority {
+               p.Priority = priority
+               cl.pieceChanged(t, piece)
+       }
 }
 
 func (cl *Client) setEnvBlocklist() (err error) {
@@ -419,33 +391,7 @@ func (cl *Client) setEnvBlocklist() (err error) {
                return
        }
        defer f.Close()
-       var ranges []iplist.Range
-       uniqStrs := make(map[string]string)
-       scanner := bufio.NewScanner(f)
-       lineNum := 1
-       for scanner.Scan() {
-               r, ok, lineErr := iplist.ParseBlocklistP2PLine(scanner.Bytes())
-               if lineErr != nil {
-                       err = fmt.Errorf("error reading torrent blocklist line %d: %s", lineNum, lineErr)
-                       return
-               }
-               lineNum++
-               if !ok {
-                       continue
-               }
-               if s, ok := uniqStrs[r.Description]; ok {
-                       r.Description = s
-               } else {
-                       uniqStrs[r.Description] = r.Description
-               }
-               ranges = append(ranges, r)
-       }
-       err = scanner.Err()
-       if err != nil {
-               err = fmt.Errorf("error reading torrent blocklist: %s", err)
-               return
-       }
-       cl.ipBlockList = iplist.New(ranges)
+       cl.ipBlockList, err = iplist.NewFromReader(f)
        return
 }
 
@@ -485,21 +431,20 @@ func (cl *Client) initBannedTorrents() error {
        return nil
 }
 
-// Creates a new client. Clients contain zero or more Torrents.
+// Creates a new client.
 func NewClient(cfg *Config) (cl *Client, err error) {
        if cfg == nil {
                cfg = &Config{}
        }
 
+       defer func() {
+               if err != nil {
+                       cl = nil
+               }
+       }()
        cl = &Client{
-               noUpload:        cfg.NoUpload,
-               disableTrackers: cfg.DisableTrackers,
-               halfOpenLimit:   socketsPerTorrent,
-               dataDir:         cfg.DataDir,
-               disableUTP:      cfg.DisableUTP,
-               disableTCP:      cfg.DisableTCP,
-               _configDir:      cfg.ConfigDir,
-               config:          *cfg,
+               halfOpenLimit: socketsPerTorrent,
+               config:        *cfg,
                torrentDataOpener: func(md *metainfo.Info) data.Data {
                        return filePkg.TorrentData(md, cfg.DataDir)
                },
@@ -542,13 +487,11 @@ func NewClient(cfg *Config) (cl *Client, err error) {
                        return addr.String()
                }
                if cfg.ListenAddr == "" {
-                       // IPv6 isn't well supported with blocklists, or with trackers and
-                       // DHT.
-                       return "0.0.0.0:50007"
+                       return ":50007"
                }
                return cfg.ListenAddr
        }
-       if !cl.disableTCP {
+       if !cl.config.DisableTCP {
                var l net.Listener
                l, err = net.Listen("tcp", listenAddr())
                if err != nil {
@@ -557,7 +500,7 @@ func NewClient(cfg *Config) (cl *Client, err error) {
                cl.listeners = append(cl.listeners, l)
                go cl.acceptConnections(l, false)
        }
-       if !cl.disableUTP {
+       if !cl.config.DisableUTP {
                cl.utpSock, err = utp.NewSocket(listenAddr())
                if err != nil {
                        return
@@ -568,7 +511,9 @@ func NewClient(cfg *Config) (cl *Client, err error) {
        if !cfg.NoDHT {
                dhtCfg := cfg.DHTConfig
                if dhtCfg == nil {
-                       dhtCfg = &dht.ServerConfig{}
+                       dhtCfg = &dht.ServerConfig{
+                               IPBlocklist: cl.ipBlockList,
+                       }
                }
                if dhtCfg.Addr == "" {
                        dhtCfg.Addr = listenAddr()
@@ -577,9 +522,6 @@ func NewClient(cfg *Config) (cl *Client, err error) {
                        dhtCfg.Conn = cl.utpSock.PacketConn()
                }
                cl.dHT, err = dht.NewServer(dhtCfg)
-               if cl.ipBlockList != nil {
-                       cl.dHT.SetIPBlockList(cl.ipBlockList)
-               }
                if err != nil {
                        return
                }
@@ -618,13 +560,13 @@ func (cl *Client) ipBlockRange(ip net.IP) (r *iplist.Range) {
        if cl.ipBlockList == nil {
                return
        }
-       ip = ip.To4()
-       if ip == nil {
-               log.Printf("saw non-IPv4 address")
+       ip4 := ip.To4()
+       if ip4 == nil {
+               log.Printf("blocking non-IPv4 address: %s", ip)
                r = &ipv6BlockRange
                return
        }
-       r = cl.ipBlockList.Lookup(ip)
+       r = cl.ipBlockList.Lookup(ip4)
        return
 }
 
@@ -659,13 +601,17 @@ func (cl *Client) acceptConnections(l net.Listener, utp bool) {
                        log.Print(err)
                        return
                }
-               acceptedConns.Add(1)
+               if utp {
+                       acceptUTP.Add(1)
+               } else {
+                       acceptTCP.Add(1)
+               }
                cl.mu.RLock()
                doppleganger := cl.dopplegangerAddr(conn.RemoteAddr().String())
                blockRange := cl.ipBlockRange(AddrIP(conn.RemoteAddr()))
                cl.mu.RUnlock()
                if blockRange != nil || doppleganger {
-                       inboundConnsBlocked.Add(1)
+                       acceptReject.Add(1)
                        // log.Printf("inbound connection from %s blocked by %s", conn.RemoteAddr(), blockRange)
                        conn.Close()
                        continue
@@ -734,6 +680,9 @@ func doDial(dial func(addr string, t *torrent) (net.Conn, error), ch chan dialRe
                        return
                }
        }
+       if utp && err.Error() == "timed out waiting for ack" {
+               return
+       }
        if err != nil {
                log.Printf("error dialing %s: %s", addr, err)
                return
@@ -773,7 +722,10 @@ func (me *Client) initiateConn(peer Peer, t *torrent) {
 }
 
 func (me *Client) dialTimeout(t *torrent) time.Duration {
-       return reducedDialTimeout(nominalDialTimeout, me.halfOpenLimit, len(t.Peers))
+       me.mu.Lock()
+       pendingPeers := len(t.Peers)
+       me.mu.Unlock()
+       return reducedDialTimeout(nominalDialTimeout, me.halfOpenLimit, pendingPeers)
 }
 
 func (me *Client) dialTCP(addr string, t *torrent) (c net.Conn, err error) {
@@ -793,17 +745,17 @@ func (me *Client) dial(addr string, t *torrent) (conn net.Conn, utp bool) {
        // Initiate connections via TCP and UTP simultaneously. Use the first one
        // that succeeds.
        left := 0
-       if !me.disableUTP {
+       if !me.config.DisableUTP {
                left++
        }
-       if !me.disableTCP {
+       if !me.config.DisableTCP {
                left++
        }
        resCh := make(chan dialResult, left)
-       if !me.disableUTP {
+       if !me.config.DisableUTP {
                go doDial(me.dialUTP, resCh, true, addr, t)
        }
-       if !me.disableTCP {
+       if !me.config.DisableTCP {
                go doDial(me.dialTCP, resCh, false, addr, t)
        }
        var res dialResult
@@ -858,7 +810,7 @@ func (me *Client) establishOutgoingConn(t *torrent, addr string) (c *connection,
        if nc == nil {
                return
        }
-       c, err = handshakesConnection(nc, true, utp)
+       c, err = handshakesConnection(nc, !me.config.DisableEncryption, utp)
        if err != nil {
                nc.Close()
                return
@@ -866,6 +818,12 @@ func (me *Client) establishOutgoingConn(t *torrent, addr string) (c *connection,
                return
        }
        nc.Close()
+       // Try again without encryption, using whichever protocol type worked last
+       // time.
+       if me.config.DisableEncryption {
+               // We already tried without encryption.
+               return
+       }
        if utp {
                nc, err = me.dialUTP(addr, t)
        } else {
@@ -1110,16 +1068,18 @@ func (cl *Client) receiveHandshakes(c *connection) (t *torrent, err error) {
        cl.mu.Lock()
        skeys := cl.receiveSkeys()
        cl.mu.Unlock()
-       c.rw, c.encrypted, err = maybeReceiveEncryptedHandshake(c.rw, skeys)
-       if err != nil {
-               if err == mse.ErrNoSecretKeyMatch {
-                       err = nil
+       if !cl.config.DisableEncryption {
+               c.rw, c.encrypted, err = maybeReceiveEncryptedHandshake(c.rw, skeys)
+               if err != nil {
+                       if err == mse.ErrNoSecretKeyMatch {
+                               err = nil
+                       }
+                       return
                }
-               return
        }
        ih, ok, err := cl.connBTHandshake(c, nil)
        if err != nil {
-               fmt.Errorf("error during bt handshake: %s", err)
+               err = fmt.Errorf("error during bt handshake: %s", err)
                return
        }
        if !ok {
@@ -1217,15 +1177,10 @@ func (me *Client) sendInitialMessages(conn *connection, torrent *torrent) {
                                        }(),
                                        "v": extendedHandshakeClientVersion,
                                        // No upload queue is implemented yet.
-                                       "reqq": func() int {
-                                               if me.noUpload {
-                                                       // No need to look strange if it costs us nothing.
-                                                       return 250
-                                               } else {
-                                                       return 1
-                                               }
-                                       }(),
-                                       "e": 1, // Awwww yeah
+                                       "reqq": 64,
+                               }
+                               if !me.config.DisableEncryption {
+                                       d["e"] = 1
                                }
                                if torrent.metadataSizeKnown() {
                                        d["metadata_size"] = torrent.metadataSize()
@@ -1261,7 +1216,7 @@ func (me *Client) sendInitialMessages(conn *connection, torrent *torrent) {
        if conn.PeerExtensionBytes.SupportsDHT() && me.extensionBytes.SupportsDHT() && me.dHT != nil {
                conn.Post(pp.Message{
                        Type: pp.Port,
-                       Port: uint16(AddrPort(me.dHT.LocalAddr())),
+                       Port: uint16(AddrPort(me.dHT.Addr())),
                })
        }
 }
@@ -1318,11 +1273,12 @@ func (cl *Client) connCancel(t *torrent, cn *connection, r request) (ok bool) {
        return
 }
 
-func (cl *Client) connDeleteRequest(t *torrent, cn *connection, r request) {
+func (cl *Client) connDeleteRequest(t *torrent, cn *connection, r request) bool {
        if !cn.RequestPending(r) {
-               return
+               return false
        }
        delete(cn.Requests, r)
+       return true
 }
 
 func (cl *Client) requestPendingMetadata(t *torrent, c *connection) {
@@ -1444,6 +1400,54 @@ func (cl *Client) peerHasAll(t *torrent, cn *connection) {
        }
 }
 
+func (me *Client) upload(t *torrent, c *connection) {
+       if me.config.NoUpload {
+               return
+       }
+       if !c.PeerInterested {
+               return
+       }
+       if !me.seeding(t) && !t.connHasWantedPieces(c) {
+               return
+       }
+another:
+       for c.chunksSent < c.UsefulChunksReceived+6 {
+               c.Unchoke()
+               for r := range c.PeerRequests {
+                       err := me.sendChunk(t, c, r)
+                       if err != nil {
+                               log.Printf("error sending chunk to peer: %s", err)
+                       }
+                       delete(c.PeerRequests, r)
+                       goto another
+               }
+               return
+       }
+       c.Choke()
+}
+
+func (me *Client) sendChunk(t *torrent, c *connection, r request) error {
+       b := make([]byte, r.Length)
+       p := t.Info.Piece(int(r.Index))
+       n, err := dataReadAt(t.data, b, p.Offset()+int64(r.Begin))
+       if err != nil {
+               return err
+       }
+       if n != len(b) {
+               log.Fatal(b)
+       }
+       c.Post(pp.Message{
+               Type:  pp.Piece,
+               Index: r.Index,
+               Begin: r.Begin,
+               Piece: b,
+       })
+       uploadChunksPosted.Add(1)
+       c.chunksSent++
+       c.lastChunkSent = time.Now()
+       return nil
+}
+
 // Processes incoming bittorrent messages. The client lock is held upon entry
 // and exit.
 func (me *Client) connectionLoop(t *torrent, c *connection) error {
@@ -1488,42 +1492,25 @@ func (me *Client) connectionLoop(t *torrent, c *connection) error {
                        me.peerUnchoked(t, c)
                case pp.Interested:
                        c.PeerInterested = true
-                       // TODO: This should be done from a dedicated unchoking routine.
-                       if me.noUpload {
-                               break
-                       }
-                       c.Unchoke()
+                       me.upload(t, c)
                case pp.NotInterested:
                        c.PeerInterested = false
                        c.Choke()
                case pp.Have:
                        me.peerGotPiece(t, c, int(msg.Index))
                case pp.Request:
-                       if me.noUpload {
+                       if c.Choked {
+                               break
+                       }
+                       if !c.PeerInterested {
+                               err = errors.New("peer sent request but isn't interested")
                                break
                        }
                        if c.PeerRequests == nil {
                                c.PeerRequests = make(map[request]struct{}, maxRequests)
                        }
-                       request := newRequest(msg.Index, msg.Begin, msg.Length)
-                       // TODO: Requests should be satisfied from a dedicated upload
-                       // routine.
-                       // c.PeerRequests[request] = struct{}{}
-                       p := make([]byte, msg.Length)
-                       n, err := dataReadAt(t.data, p, int64(t.pieceLength(0))*int64(msg.Index)+int64(msg.Begin))
-                       if err != nil {
-                               return fmt.Errorf("reading t data to serve request %q: %s", request, err)
-                       }
-                       if n != int(msg.Length) {
-                               return fmt.Errorf("bad request: %v", msg)
-                       }
-                       c.Post(pp.Message{
-                               Type:  pp.Piece,
-                               Index: msg.Index,
-                               Begin: msg.Begin,
-                               Piece: p,
-                       })
-                       uploadChunksPosted.Add(1)
+                       c.PeerRequests[newRequest(msg.Index, msg.Begin, msg.Length)] = struct{}{}
+                       me.upload(t, c)
                case pp.Cancel:
                        req := newRequest(msg.Index, msg.Begin, msg.Length)
                        if !c.PeerCancel(req) {
@@ -1693,27 +1680,31 @@ func (me *Client) connectionLoop(t *torrent, c *connection) error {
        }
 }
 
-func (me *Client) dropConnection(torrent *torrent, conn *connection) {
-       me.event.Broadcast()
-       for r := range conn.Requests {
-               me.connDeleteRequest(torrent, conn, r)
-       }
-       conn.Close()
-       for i0, c := range torrent.Conns {
-               if c != conn {
+// Returns true if connection is removed from torrent.Conns.
+func (me *Client) deleteConnection(t *torrent, c *connection) bool {
+       for i0, _c := range t.Conns {
+               if _c != c {
                        continue
                }
-               i1 := len(torrent.Conns) - 1
+               i1 := len(t.Conns) - 1
                if i0 != i1 {
-                       torrent.Conns[i0] = torrent.Conns[i1]
+                       t.Conns[i0] = t.Conns[i1]
                }
-               torrent.Conns = torrent.Conns[:i1]
-               me.openNewConns(torrent)
-               return
+               t.Conns = t.Conns[:i1]
+               return true
+       }
+       return false
+}
+
+func (me *Client) dropConnection(t *torrent, c *connection) {
+       me.event.Broadcast()
+       c.Close()
+       if me.deleteConnection(t, c) {
+               me.openNewConns(t)
        }
-       panic("connection not found")
 }
 
+// Returns true if the connection is added.
 func (me *Client) addConnection(t *torrent, c *connection) bool {
        if me.stopped() {
                return false
@@ -1733,13 +1724,19 @@ func (me *Client) addConnection(t *torrent, c *connection) bool {
                        return false
                }
        }
-       t.Conns = append(t.Conns, c)
-       // TODO: This should probably be done by a routine that kills off bad
-       // connections, and extra connections killed here instead.
-       if len(t.Conns) > socketsPerTorrent {
-               wcs := t.worstConnsHeap()
-               heap.Pop(wcs).(*connection).Close()
+       if len(t.Conns) >= socketsPerTorrent {
+               c := t.worstBadConn(me)
+               if c == nil {
+                       return false
+               }
+               log.Printf("%s: dropping connection to make room for new one: %s", t, c)
+               c.Close()
+               me.deleteConnection(t, c)
        }
+       if len(t.Conns) >= socketsPerTorrent {
+               panic(len(t.Conns))
+       }
+       t.Conns = append(t.Conns, c)
        return true
 }
 
@@ -1755,40 +1752,29 @@ func (t *torrent) needData() bool {
        return false
 }
 
-// TODO: I'm sure there's something here to do with seeding.
-func (t *torrent) badConn(c *connection) bool {
-       // A 30 second grace for initial messages to go through.
-       if time.Since(c.completedHandshake) < 30*time.Second {
+func (cl *Client) usefulConn(t *torrent, c *connection) bool {
+       select {
+       case <-c.closing:
                return false
+       default:
        }
        if !t.haveInfo() {
-               if !c.supportsExtension("ut_metadata") {
-                       return true
-               }
-               if time.Since(c.completedHandshake) > 2*time.Minute {
-                       return true
-               }
+               return c.supportsExtension("ut_metadata")
        }
-       return !t.connHasWantedPieces(c)
-}
-
-func (t *torrent) numGoodConns() (num int) {
-       for _, c := range t.Conns {
-               if !t.badConn(c) {
-                       num++
-               }
+       if cl.seeding(t) {
+               return c.PeerInterested
        }
-       return
+       return t.connHasWantedPieces(c)
 }
 
 func (me *Client) wantConns(t *torrent) bool {
-       if me.noUpload && !t.needData() {
+       if !me.seeding(t) && !t.needData() {
                return false
        }
-       if t.numGoodConns() >= socketsPerTorrent {
-               return false
+       if len(t.Conns) < socketsPerTorrent {
+               return true
        }
-       return true
+       return t.worstBadConn(me) != nil
 }
 
 func (me *Client) openNewConns(t *torrent) {
@@ -1825,6 +1811,10 @@ func (me *Client) addPeers(t *torrent, peers []Peer) {
                if me.ipBlockRange(p.IP) != nil {
                        continue
                }
+               if p.Port == 0 {
+                       log.Printf("got bad peer: %v", p)
+                       continue
+               }
                t.addPeer(p)
        }
        me.openNewConns(t)
@@ -1865,7 +1855,7 @@ func (cl *Client) startTorrent(t *torrent) {
        }
        // If the client intends to upload, it needs to know what state pieces are
        // in.
-       if !cl.noUpload {
+       if !cl.config.NoUpload {
                // Queue all pieces for hashing. This is done sequentially to avoid
                // spamming goroutines.
                for _, p := range t.Pieces {
@@ -1886,6 +1876,9 @@ func (cl *Client) setStorage(t *torrent, td data.Data) (err error) {
        if err != nil {
                return
        }
+       for index := range iter.N(t.numPieces()) {
+               cl.pieceChanged(t, index)
+       }
        cl.startTorrent(t)
        return
 }
@@ -1903,11 +1896,6 @@ func (cl *Client) setMetaData(t *torrent, md *metainfo.Info, bytes []byte) (err
                }
        }
        cl.event.Broadcast()
-       if strings.Contains(strings.ToLower(md.Name), "porn") {
-               cl.dropTorrent(t.InfoHash)
-               err = errors.New("no porn plx")
-               return
-       }
        close(t.gotMetainfo)
        td := cl.torrentDataOpener(md)
        err = cl.setStorage(t, td)
@@ -1930,7 +1918,6 @@ func newTorrent(ih InfoHash) (t *torrent, err error) {
                HalfOpen: make(map[string]struct{}),
        }
        t.wantPeers.L = &t.stateMu
-       t.GotMetainfo = t.gotMetainfo
        return
 }
 
@@ -1987,12 +1974,6 @@ func (t *torrent) addTrackers(announceList [][]string) {
        t.Trackers = newTrackers
 }
 
-// A handle to a live torrent within a Client.
-type Torrent struct {
-       cl *Client
-       *torrent
-}
-
 // Don't call this before the info is available.
 func (t *torrent) BytesCompleted() int64 {
        if !t.haveInfo() {
@@ -2001,33 +1982,6 @@ func (t *torrent) BytesCompleted() int64 {
        return t.Info.TotalLength() - t.bytesLeft()
 }
 
-func (t Torrent) NumPieces() int {
-       return t.numPieces()
-}
-
-func (t Torrent) Drop() {
-       t.cl.mu.Lock()
-       t.cl.dropTorrent(t.InfoHash)
-       t.cl.mu.Unlock()
-}
-
-// Provides access to regions of torrent data that correspond to its files.
-type File struct {
-       t      Torrent
-       path   string
-       offset int64
-       length int64
-       fi     metainfo.FileInfo
-}
-
-func (f File) FileInfo() metainfo.FileInfo {
-       return f.fi
-}
-
-func (f File) Path() string {
-       return f.path
-}
-
 // A file-like handle to some torrent data resource.
 type Handle interface {
        io.Reader
@@ -2036,119 +1990,11 @@ type Handle interface {
        io.ReaderAt
 }
 
-// Implements a Handle within a subsection of another Handle.
-type sectionHandle struct {
-       h           Handle
-       off, n, cur int64
-}
-
-func (me *sectionHandle) Seek(offset int64, whence int) (ret int64, err error) {
-       if whence == 0 {
-               offset += me.off
-       } else if whence == 2 {
-               whence = 0
-               offset += me.off + me.n
-       }
-       ret, err = me.h.Seek(offset, whence)
-       me.cur = ret
-       ret -= me.off
-       return
-}
-
-func (me *sectionHandle) Close() error {
-       return me.h.Close()
-}
-
-func (me *sectionHandle) Read(b []byte) (n int, err error) {
-       max := me.off + me.n - me.cur
-       if int64(len(b)) > max {
-               b = b[:max]
-       }
-       n, err = me.h.Read(b)
-       me.cur += int64(n)
-       if err != nil {
-               return
-       }
-       if me.cur == me.off+me.n {
-               err = io.EOF
-       }
-       return
-}
-
-func (me *sectionHandle) ReadAt(b []byte, off int64) (n int, err error) {
-       if off >= me.n {
-               err = io.EOF
-               return
-       }
-       if int64(len(b)) >= me.n-off {
-               b = b[:me.n-off]
-       }
-       return me.h.ReadAt(b, me.off+off)
-}
-
-func (f File) Open() (h Handle, err error) {
-       h = f.t.NewReadHandle()
-       _, err = h.Seek(f.offset, os.SEEK_SET)
-       if err != nil {
-               h.Close()
-               return
-       }
-       h = &sectionHandle{h, f.offset, f.Length(), f.offset}
-       return
-}
-
-func (f File) ReadAt(p []byte, off int64) (n int, err error) {
-       maxLen := f.length - off
-       if int64(len(p)) > maxLen {
-               p = p[:maxLen]
-       }
-       return f.t.ReadAt(p, off+f.offset)
-}
-
-func (f *File) Length() int64 {
-       return f.length
-}
-
-type FilePieceState struct {
-       Length int64
-       State  byte
-}
-
-func (f *File) Progress() (ret []FilePieceState) {
-       pieceSize := int64(f.t.usualPieceSize())
-       off := f.offset % pieceSize
-       remaining := f.length
-       for i := int(f.offset / pieceSize); ; i++ {
-               if remaining == 0 {
-                       break
-               }
-               len1 := pieceSize - off
-               if len1 > remaining {
-                       len1 = remaining
-               }
-               ret = append(ret, FilePieceState{len1, f.t.pieceStatusChar(i)})
-               off = 0
-               remaining -= len1
-       }
-       return
-}
-
-func (f *File) PrioritizeRegion(off, len int64) {
-       if off < 0 || off >= f.length {
-               return
-       }
-       if off+len > f.length {
-               len = f.length - off
-       }
-       off += f.offset
-       f.t.SetRegionPriority(off, len)
-}
-
 // Returns handles to the files in the torrent. This requires the metainfo is
 // available first.
 func (t Torrent) Files() (ret []File) {
        t.cl.mu.Lock()
-       info := t.Info
+       info := t.Info()
        t.cl.mu.Unlock()
        if info == nil {
                return
@@ -2167,12 +2013,13 @@ func (t Torrent) Files() (ret []File) {
        return
 }
 
+// Marks the pieces in the given region for download.
 func (t Torrent) SetRegionPriority(off, len int64) {
        t.cl.mu.Lock()
        defer t.cl.mu.Unlock()
        pieceSize := int64(t.usualPieceSize())
        for i := off / pieceSize; i*pieceSize < off+len; i++ {
-               t.cl.prioritizePiece(t.torrent, int(i), piecePriorityNormal)
+               t.cl.raisePiecePriority(t.torrent, int(i), PiecePriorityNormal)
        }
 }
 
@@ -2184,21 +2031,18 @@ func (t Torrent) AddPeers(pp []Peer) error {
        return nil
 }
 
-// Marks the entire torrent for download.
+// Marks the entire torrent for download. Requires the info first, see
+// GotInfo.
 func (t Torrent) DownloadAll() {
        t.cl.mu.Lock()
        defer t.cl.mu.Unlock()
        for i := range iter.N(t.numPieces()) {
-               t.cl.raisePiecePriority(t.torrent, i, piecePriorityNormal)
+               t.cl.raisePiecePriority(t.torrent, i, PiecePriorityNormal)
        }
        // Nice to have the first and last pieces sooner for various interactive
        // purposes.
-       t.cl.raisePiecePriority(t.torrent, 0, piecePriorityReadahead)
-       t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, piecePriorityReadahead)
-}
-
-func (me Torrent) ReadAt(p []byte, off int64) (n int, err error) {
-       return me.cl.torrentReadAt(me.torrent, off, p)
+       t.cl.raisePiecePriority(t.torrent, 0, PiecePriorityReadahead)
+       t.cl.raisePiecePriority(t.torrent, t.numPieces()-1, PiecePriorityReadahead)
 }
 
 // Returns nil metainfo if it isn't in the cache. Checks that the retrieved
@@ -2244,8 +2088,8 @@ func TorrentSpecFromMagnetURI(uri string) (spec *TorrentSpec, err error) {
        spec = &TorrentSpec{
                Trackers:    [][]string{m.Trackers},
                DisplayName: m.DisplayName,
+               InfoHash:    m.InfoHash,
        }
-       CopyExact(&spec.InfoHash, &m.InfoHash)
        return
 }
 
@@ -2313,10 +2157,7 @@ func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (T Torrent, new bool, err er
        // From this point onwards, we can consider the torrent a part of the
        // client.
        if new {
-               t.pruneTimer = time.AfterFunc(0, func() {
-                       cl.pruneConnectionsUnlocked(T.torrent)
-               })
-               if !cl.disableTrackers {
+               if !cl.config.DisableTrackers {
                        go cl.announceTorrentTrackers(T.torrent)
                }
                if cl.dHT != nil {
@@ -2326,35 +2167,6 @@ func (cl *Client) AddTorrentSpec(spec *TorrentSpec) (T Torrent, new bool, err er
        return
 }
 
-// Prunes unused connections. This is required to make space to dial for
-// replacements.
-func (cl *Client) pruneConnectionsUnlocked(t *torrent) {
-       select {
-       case <-t.ceasingNetworking:
-               return
-       case <-t.closing:
-               return
-       default:
-       }
-       cl.mu.Lock()
-       license := len(t.Conns) - (socketsPerTorrent+1)/2
-       for _, c := range t.Conns {
-               if license <= 0 {
-                       break
-               }
-               if time.Now().Sub(c.lastUsefulChunkReceived) < time.Minute {
-                       continue
-               }
-               if time.Now().Sub(c.completedHandshake) < time.Minute {
-                       continue
-               }
-               c.Close()
-               license--
-       }
-       cl.mu.Unlock()
-       t.pruneTimer.Reset(pruneInterval)
-}
-
 func (me *Client) dropTorrent(infoHash InfoHash) (err error) {
        t, ok := me.torrents[infoHash]
        if !ok {
@@ -2381,9 +2193,13 @@ func (cl *Client) waitWantPeers(t *torrent) bool {
                        return false
                default:
                }
-               if len(t.Peers) < torrentPeersLowWater && t.needData() {
+               if len(t.Peers) > torrentPeersLowWater {
+                       goto wait
+               }
+               if t.needData() || cl.seeding(t) {
                        return true
                }
+       wait:
                cl.mu.Unlock()
                t.wantPeers.Wait()
                t.stateMu.Unlock()
@@ -2392,6 +2208,20 @@ func (cl *Client) waitWantPeers(t *torrent) bool {
        }
 }
 
+// Returns whether the client should make effort to seed the torrent.
+func (cl *Client) seeding(t *torrent) bool {
+       if cl.config.NoUpload {
+               return false
+       }
+       if !cl.config.Seed {
+               return false
+       }
+       if t.needData() {
+               return false
+       }
+       return true
+}
+
 func (cl *Client) announceTorrentDHT(t *torrent, impliedPort bool) {
        for cl.waitWantPeers(t) {
                log.Printf("getting peers for %q from DHT", t)
@@ -2404,7 +2234,7 @@ func (cl *Client) announceTorrentDHT(t *torrent, impliedPort bool) {
        getPeers:
                for {
                        select {
-                       case v, ok := <-ps.Values:
+                       case v, ok := <-ps.Peers:
                                if !ok {
                                        break getPeers
                                }
@@ -2609,11 +2439,17 @@ func (me *Client) fillRequests(t *torrent, c *connection) {
                }
        }
        addRequest := func(req request) (again bool) {
+               // TODO: Couldn't this check also be done *after* the request?
                if len(c.Requests) >= 64 {
                        return false
                }
                return c.Request(req)
        }
+       for req := range t.urgent {
+               if !addRequest(req) {
+                       return
+               }
+       }
        for e := c.pieceRequestOrder.First(); e != nil; e = e.Next() {
                pieceIndex := e.Piece()
                if !c.PeerHasPiece(pieceIndex) {
@@ -2623,7 +2459,7 @@ func (me *Client) fillRequests(t *torrent, c *connection) {
                        panic("unwanted piece in connection request order")
                }
                piece := t.Pieces[pieceIndex]
-               for _, cs := range piece.shuffledPendingChunkSpecs() {
+               for _, cs := range piece.shuffledPendingChunkSpecs(t.pieceLength(pieceIndex)) {
                        r := request{pp.Integer(pieceIndex), cs}
                        if !addRequest(r) {
                                return
@@ -2649,20 +2485,22 @@ func (me *Client) replenishConnRequests(t *torrent, c *connection) {
 
 // Handle a received chunk from a peer.
 func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) error {
-       chunksDownloadedCount.Add(1)
+       chunksReceived.Add(1)
 
        req := newRequest(msg.Index, msg.Begin, pp.Integer(len(msg.Piece)))
 
        // Request has been satisfied.
-       me.connDeleteRequest(t, c, req)
-
-       defer me.replenishConnRequests(t, c)
+       if me.connDeleteRequest(t, c, req) {
+               defer me.replenishConnRequests(t, c)
+       } else {
+               unexpectedChunksReceived.Add(1)
+       }
 
        piece := t.Pieces[req.Index]
 
        // Do we actually want this chunk?
-       if _, ok := piece.PendingChunkSpecs[req.chunkSpec]; !ok || piece.Priority == piecePriorityNone {
-               unusedDownloadedChunksCount.Add(1)
+       if !t.wantChunk(req) {
+               unwantedChunksReceived.Add(1)
                c.UnwantedChunksReceived++
                return nil
        }
@@ -2670,15 +2508,21 @@ func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) er
        c.UsefulChunksReceived++
        c.lastUsefulChunkReceived = time.Now()
 
+       me.upload(t, c)
+
        // Write the chunk out.
        err := t.writeChunk(int(msg.Index), int64(msg.Begin), msg.Piece)
        if err != nil {
-               return fmt.Errorf("error writing chunk: %s", err)
+               log.Printf("error writing chunk: %s", err)
+               return nil
        }
 
+       // log.Println("got chunk", req)
+       piece.Event.Broadcast()
        // Record that we have the chunk.
-       delete(piece.PendingChunkSpecs, req.chunkSpec)
-       if len(piece.PendingChunkSpecs) == 0 {
+       piece.unpendChunkIndex(chunkIndex(req.chunkSpec))
+       delete(t.urgent, req)
+       if piece.numPendingChunks() == 0 {
                for _, c := range t.Conns {
                        c.pieceRequestOrder.DeletePiece(int(req.Index))
                }
@@ -2697,35 +2541,43 @@ func (me *Client) downloadedChunk(t *torrent, c *connection, msg *pp.Message) er
 
 func (me *Client) pieceHashed(t *torrent, piece pp.Integer, correct bool) {
        p := t.Pieces[piece]
-       if p.EverHashed && !correct {
-               log.Printf("%s: piece %d failed hash", t, piece)
-               failedPieceHashes.Add(1)
+       if p.EverHashed {
+               if correct {
+                       pieceHashedCorrect.Add(1)
+               } else {
+                       log.Printf("%s: piece %d failed hash", t, piece)
+                       pieceHashedNotCorrect.Add(1)
+               }
        }
        p.EverHashed = true
        if correct {
-               if sd, ok := t.data.(StatefulData); ok {
-                       err := sd.PieceCompleted(int(piece))
-                       if err != nil {
-                               log.Printf("error completing piece: %s", err)
-                               correct = false
-                       }
+               err := t.data.PieceCompleted(int(piece))
+               if err != nil {
+                       log.Printf("error completing piece: %s", err)
+                       correct = false
                }
        }
        me.pieceChanged(t, int(piece))
 }
 
+// TODO: Check this isn't called more than once for each piece being correct.
 func (me *Client) pieceChanged(t *torrent, piece int) {
        correct := t.pieceComplete(piece)
        p := t.Pieces[piece]
+       defer p.Event.Broadcast()
        if correct {
-               p.Priority = piecePriorityNone
+               p.Priority = PiecePriorityNone
                p.PendingChunkSpecs = nil
-               p.Event.Broadcast()
+               for req := range t.urgent {
+                       if int(req.Index) == piece {
+                               delete(t.urgent, req)
+                       }
+               }
        } else {
-               if len(p.PendingChunkSpecs) == 0 {
+               if p.numPendingChunks() == 0 {
                        t.pendAllChunkSpecs(int(piece))
                }
-               if p.Priority != piecePriorityNone {
+               if t.wantPiece(piece) {
                        me.openNewConns(t)
                }
        }
@@ -2738,7 +2590,7 @@ func (me *Client) pieceChanged(t *torrent, piece int) {
                        // TODO: Cancel requests for this piece.
                        for r := range conn.Requests {
                                if int(r.Index) == piece {
-                                       panic("wat")
+                                       conn.Cancel(r)
                                }
                        }
                        conn.pieceRequestOrder.DeletePiece(int(piece))
@@ -2748,9 +2600,6 @@ func (me *Client) pieceChanged(t *torrent, piece int) {
                        me.replenishConnRequests(t, conn)
                }
        }
-       if t.haveAllPieces() && me.noUpload {
-               t.ceaseNetworking()
-       }
        me.event.Broadcast()
 }
 
index 08dbc80cd8b655647a35be4fbf58ec1e1b481ebc..3cd1f0ec398a81af7ea37f0d2d33ba590db3b2a7 100644 (file)
@@ -8,16 +8,21 @@ import (
        "log"
        "net"
        "os"
+       "path/filepath"
        "testing"
        "time"
 
-       "github.com/anacrolix/libtorgo/bencode"
+       _ "github.com/anacrolix/envpprof"
        "github.com/anacrolix/utp"
        "github.com/bradfitz/iter"
+       "github.com/stretchr/testify/assert"
        "gopkg.in/check.v1"
 
+       "github.com/anacrolix/torrent/bencode"
+       "github.com/anacrolix/torrent/data"
        "github.com/anacrolix/torrent/data/blob"
        "github.com/anacrolix/torrent/internal/testutil"
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/util"
 )
 
@@ -31,6 +36,7 @@ var TestingConfig = Config{
        DisableTrackers:      true,
        NoDefaultBlocklist:   true,
        DisableMetainfoCache: true,
+       DataDir:              filepath.Join(os.TempDir(), "anacrolix"),
 }
 
 func TestClientDefault(t *testing.T) {
@@ -96,16 +102,12 @@ func TestTorrentInitialState(t *testing.T) {
        }
        p := tor.Pieces[0]
        tor.pendAllChunkSpecs(0)
-       if len(p.PendingChunkSpecs) != 1 {
+       if p.numPendingChunks() != 1 {
                t.Fatalf("should only be 1 chunk: %v", p.PendingChunkSpecs)
        }
        // TODO: Set chunkSize to 2, to test odd/even silliness.
-       if false {
-               if _, ok := p.PendingChunkSpecs[chunkSpec{
-                       Length: 13,
-               }]; !ok {
-                       t.Fatal("pending chunk spec is incorrect")
-               }
+       if chunkIndexSpec(0, tor.pieceLength(0)).Length != 5 {
+               t.Fatal("pending chunk spec is incorrect")
        }
 }
 
@@ -177,7 +179,9 @@ func TestUTPRawConn(t *testing.T) {
        defer peer.Close()
 
        msgsReceived := 0
-       const N = 5000 // How many messages to send.
+       // How many messages to send. I've set this to double the channel buffer
+       // size in the raw packetConn.
+       const N = 200
        readerStopped := make(chan struct{})
        // The reader goroutine.
        go func() {
@@ -248,6 +252,7 @@ func TestClientTransfer(t *testing.T) {
        greetingTempDir, mi := testutil.GreetingTestTorrent()
        defer os.RemoveAll(greetingTempDir)
        cfg := TestingConfig
+       cfg.Seed = true
        cfg.DataDir = greetingTempDir
        seeder, err := NewClient(&cfg)
        if err != nil {
@@ -273,9 +278,11 @@ func TestClientTransfer(t *testing.T) {
                        Port: util.AddrPort(seeder.ListenAddr()),
                },
        })
-       _greeting, err := ioutil.ReadAll(io.NewSectionReader(leecherGreeting, 0, leecherGreeting.Length()))
+       r := leecherGreeting.NewReader()
+       defer r.Close()
+       _greeting, err := ioutil.ReadAll(r)
        if err != nil {
-               t.Fatal(err)
+               t.Fatalf("%q %s", string(_greeting), err)
        }
        greeting := string(_greeting)
        if greeting != testutil.GreetingFileContents {
@@ -289,15 +296,14 @@ func TestReadaheadPieces(t *testing.T) {
                readaheadPieces             int
        }{
                {5 * 1024 * 1024, 256 * 1024, 19},
-               {5 * 1024 * 1024, 5 * 1024 * 1024, 0},
-               {5*1024*1024 - 1, 5 * 1024 * 1024, 0},
-               {5 * 1024 * 1024, 5*1024*1024 - 1, 1},
-               {0, 5 * 1024 * 1024, -1},
+               {5 * 1024 * 1024, 5 * 1024 * 1024, 1},
+               {5*1024*1024 - 1, 5 * 1024 * 1024, 1},
+               {5 * 1024 * 1024, 5*1024*1024 - 1, 2},
+               {0, 5 * 1024 * 1024, 0},
                {5 * 1024 * 1024, 1048576, 4},
        } {
-               if readaheadPieces(case_.readaheadBytes, case_.pieceLength) != case_.readaheadPieces {
-                       t.Fatalf("case failed: %v", case_)
-               }
+               pieces := readaheadPieces(case_.readaheadBytes, case_.pieceLength)
+               assert.Equal(t, case_.readaheadPieces, pieces, "%v", case_)
        }
 }
 
@@ -317,3 +323,84 @@ func (suite) TestMergingTrackersByAddingSpecs(c *check.C) {
        c.Assert(T.Trackers[0][0].URL(), check.Equals, "http://a")
        c.Assert(T.Trackers[1][0].URL(), check.Equals, "udp://b")
 }
+
+type badData struct {
+}
+
+func (me badData) WriteAt(b []byte, off int64) (int, error) {
+       return 0, nil
+}
+
+func (me badData) WriteSectionTo(w io.Writer, off, n int64) (int64, error) {
+       return 0, nil
+}
+
+func (me badData) PieceComplete(piece int) bool {
+       return true
+}
+
+func (me badData) PieceCompleted(piece int) error {
+       return nil
+}
+
+func (me badData) ReadAt(b []byte, off int64) (n int, err error) {
+       if off >= 5 {
+               err = io.EOF
+               return
+       }
+       n = copy(b, []byte("hello")[off:])
+       return
+}
+
+var _ StatefulData = badData{}
+
+// We read from a piece which is marked completed, but is missing data.
+func TestCompletedPieceWrongSize(t *testing.T) {
+       cfg := TestingConfig
+       cfg.TorrentDataOpener = func(*metainfo.Info) data.Data {
+               return badData{}
+       }
+       cl, _ := NewClient(&cfg)
+       defer cl.Close()
+       tt, new, err := cl.AddTorrentSpec(&TorrentSpec{
+               Info: &metainfo.InfoEx{
+                       Info: metainfo.Info{
+                               PieceLength: 15,
+                               Pieces:      make([]byte, 20),
+                               Files: []metainfo.FileInfo{
+                                       metainfo.FileInfo{Path: []string{"greeting"}, Length: 13},
+                               },
+                       },
+               },
+       })
+       if err != nil {
+               t.Fatal(err)
+       }
+       if !new {
+               t.Fatal("expected new")
+       }
+       r := tt.NewReader()
+       defer r.Close()
+       b := make([]byte, 20)
+       n, err := io.ReadFull(r, b)
+       if n != 5 || err != io.ErrUnexpectedEOF {
+               t.Fatal(n, err)
+       }
+       defer tt.Drop()
+}
+
+func BenchmarkAddLargeTorrent(b *testing.B) {
+       cfg := TestingConfig
+       cfg.DisableTCP = true
+       cfg.DisableUTP = true
+       cfg.ListenAddr = "redonk"
+       cl, _ := NewClient(&cfg)
+       defer cl.Close()
+       for range iter.N(b.N) {
+               t, err := cl.AddTorrentFromFile("testdata/bootstrap.dat.torrent")
+               if err != nil {
+                       b.Fatal(err)
+               }
+               t.Drop()
+       }
+}
index edea0f46ed73655d8d0cd4320b929868cbc9e3ad..d1f3a116790617026d30a82236879f6b38a5d919 100644 (file)
@@ -16,11 +16,6 @@ import (
        _ "github.com/anacrolix/torrent/util/profile"
 )
 
-type pingResponse struct {
-       addr string
-       krpc dht.Msg
-}
-
 var (
        tableFileName = flag.String("tableFile", "", "name of file for storing node info")
        serveAddr     = flag.String("serveAddr", ":0", "local UDP address")
@@ -89,7 +84,7 @@ func init() {
        if err != nil {
                log.Fatalf("error loading table: %s", err)
        }
-       log.Printf("dht server on %s, ID is %x", s.LocalAddr(), s.IDString())
+       log.Printf("dht server on %s, ID is %x", s.Addr(), s.ID())
        setupSignals()
 }
 
@@ -141,7 +136,7 @@ getPeers:
        values:
                for {
                        select {
-                       case v, ok := <-ps.Values:
+                       case v, ok := <-ps.Peers:
                                if !ok {
                                        break values
                                }
index bdb3fa65eb78fdd8acd0751801bccc51c216867e..0588a84819ad91b18beee03b9b010b1ac702c66a 100644 (file)
@@ -1,3 +1,4 @@
+// Pings DHT nodes with the given network addresses.
 package main
 
 import (
@@ -30,7 +31,7 @@ func main() {
        if err != nil {
                log.Fatal(err)
        }
-       log.Printf("dht server on %s", s.LocalAddr())
+       log.Printf("dht server on %s", s.Addr())
        pingResponses := make(chan pingResponse)
        timeoutChan := make(chan struct{})
        go func() {
index 3bcf5b03ca05f63b1dec702923c38ebb73baf872..19799ce8006b57c394cda338db4d779bafd6db17 100644 (file)
@@ -11,11 +11,6 @@ import (
        "github.com/anacrolix/torrent/dht"
 )
 
-type pingResponse struct {
-       addr string
-       krpc dht.Msg
-}
-
 var (
        tableFileName = flag.String("tableFile", "", "name of file for storing node info")
        serveAddr     = flag.String("serveAddr", ":0", "local UDP address")
@@ -71,7 +66,7 @@ func init() {
        if err != nil {
                log.Fatalf("error loading table: %s", err)
        }
-       log.Printf("dht server on %s, ID is %q", s.LocalAddr(), s.IDString())
+       log.Printf("dht server on %s, ID is %q", s.Addr(), s.ID())
        setupSignals()
 }
 
index e7f643fe34c5ab682254a51e11ac0a967af9e8d8..878fc7db9404c81445c57325b9a2ce16bde88982 100644 (file)
@@ -7,9 +7,8 @@ import (
        "os"
        "sync"
 
-       "github.com/anacrolix/libtorgo/bencode"
-
        "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/bencode"
 )
 
 func main() {
@@ -27,7 +26,7 @@ func main() {
                wg.Add(1)
                go func() {
                        defer wg.Done()
-                       <-t.GotMetainfo
+                       <-t.GotInfo()
                        mi := t.MetaInfo()
                        t.Drop()
                        f, err := os.Create(mi.Info.Name + ".torrent")
index b5d3828f085b8981408f0626d3d29e55ceb5ca95..0a877c25bd03c76d56e115bfe07b2097e5324a27 100644 (file)
@@ -7,7 +7,7 @@ import (
        "path/filepath"
        "runtime"
 
-       torrent "github.com/anacrolix/libtorgo/metainfo"
+       torrent "github.com/anacrolix/torrent/metainfo"
 )
 
 var (
@@ -27,6 +27,9 @@ func main() {
        b := torrent.Builder{}
        for _, filename := range flag.Args() {
                if err := filepath.Walk(filename, func(path string, info os.FileInfo, err error) error {
+                       if _, err := os.Stat(path); os.IsNotExist(err) {
+                               return err
+                       }
                        log.Print(path)
                        if info.IsDir() {
                                return nil
index d79662034677bab62c528cee50af3fc9977a6b7d..bcbf08ec9efde655efb16065dcbc50a70b3f8045 100644 (file)
@@ -5,7 +5,7 @@ import (
        "fmt"
        "log"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 func main() {
index 23a037579a5e275140c04fdbdde27af24ebb0be4..68bae968700ba093adac01ba2aebfb8d2fd89700 100644 (file)
@@ -1,15 +1,20 @@
 package main
 
 import (
+       "flag"
        "fmt"
        "os"
 
-       "github.com/anacrolix/libtorgo/metainfo"
-
        "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 func main() {
+       flag.Parse()
+       if flag.NArg() != 0 {
+               fmt.Fprintf(os.Stderr, "%s\n", "torrent-magnet: unexpected positional arguments")
+               os.Exit(2)
+       }
        mi, err := metainfo.Load(os.Stdin)
        if err != nil {
                fmt.Fprintf(os.Stderr, "error reading metainfo from stdin: %s", err)
index 342bba201b0dd55d11f69fb85bed262fd1d94c18..bfcb7d3f24c6057f2ac0f2b2ddbf86cce2e9f748 100644 (file)
@@ -1,11 +1,13 @@
 package main
 
 import (
+       "encoding/json"
        "flag"
        "fmt"
        "log"
+       "os"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 func main() {
@@ -19,8 +21,14 @@ func main() {
                }
                if *name {
                        fmt.Printf("%s\n", metainfo.Info.Name)
-               } else {
-                       fmt.Printf("%+#v\n", metainfo)
+                       continue
+               }
+               d := map[string]interface{}{
+                       "Name":      metainfo.Info.Name,
+                       "NumPieces": metainfo.Info.NumPieces(),
                }
+               b, _ := json.MarshalIndent(d, "", "  ")
+               os.Stdout.Write(b)
        }
+       os.Stdout.WriteString("\n")
 }
diff --git a/cmd/torrent-pick/main.go b/cmd/torrent-pick/main.go
new file mode 100644 (file)
index 0000000..3bea027
--- /dev/null
@@ -0,0 +1,191 @@
+// Downloads torrents from the command-line.
+package main
+
+import (
+       "bufio"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       _ "net/http/pprof"
+       "os"
+       "strings"
+       "time"
+
+       _ "github.com/anacrolix/envpprof"
+       "github.com/dustin/go-humanize"
+       "github.com/jessevdk/go-flags"
+
+       "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/metainfo"
+)
+
+// fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0])
+
+func resolvedPeerAddrs(ss []string) (ret []torrent.Peer, err error) {
+       for _, s := range ss {
+               var addr *net.TCPAddr
+               addr, err = net.ResolveTCPAddr("tcp", s)
+               if err != nil {
+                       return
+               }
+               ret = append(ret, torrent.Peer{
+                       IP:   addr.IP,
+                       Port: addr.Port,
+               })
+       }
+       return
+}
+
+func bytesCompleted(tc *torrent.Client) (ret int64) {
+       for _, t := range tc.Torrents() {
+               if t.Info() != nil {
+                       ret += t.BytesCompleted()
+               }
+       }
+       return
+}
+
+// Returns an estimate of the total bytes for all torrents.
+func totalBytesEstimate(tc *torrent.Client) (ret int64) {
+       var noInfo, hadInfo int64
+       for _, t := range tc.Torrents() {
+               info := t.Info()
+               if info == nil {
+                       noInfo++
+                       continue
+               }
+               ret += info.TotalLength()
+               hadInfo++
+       }
+       if hadInfo != 0 {
+               // Treat each torrent without info as the average of those with,
+               // rounded up.
+               ret += (noInfo*ret + hadInfo - 1) / hadInfo
+       }
+       return
+}
+
+func progressLine(tc *torrent.Client) string {
+       return fmt.Sprintf("\033[K%s / %s\r", humanize.Bytes(uint64(bytesCompleted(tc))), humanize.Bytes(uint64(totalBytesEstimate(tc))))
+}
+
+func dstFileName(picked string) string {
+       parts := strings.Split(picked, "/")
+       return parts[len(parts)-1]
+}
+
+func main() {
+       log.SetFlags(log.LstdFlags | log.Lshortfile)
+       var rootGroup struct {
+               Client    torrent.Config `group:"Client Options"`
+               TestPeers []string       `long:"test-peer" description:"address of peer to inject to every torrent"`
+               Pick      string         `long:"pick" description:"filename to pick"`
+       }
+       // Don't pass flags.PrintError because it's inconsistent with printing.
+       // https://github.com/jessevdk/go-flags/issues/132
+       parser := flags.NewParser(&rootGroup, flags.HelpFlag|flags.PassDoubleDash)
+       parser.Usage = "[OPTIONS] (magnet URI or .torrent file path)..."
+       posArgs, err := parser.Parse()
+       if err != nil {
+               fmt.Fprintln(os.Stderr, "Download from the BitTorrent network.\n")
+               fmt.Println(err)
+               os.Exit(2)
+       }
+       log.Printf("File to pick: %s", rootGroup.Pick)
+
+       testPeers, err := resolvedPeerAddrs(rootGroup.TestPeers)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       if len(posArgs) == 0 {
+               fmt.Fprintln(os.Stderr, "no torrents specified")
+               return
+       }
+
+       tmpdir, err := ioutil.TempDir("", "torrent-pick-")
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       defer os.RemoveAll(tmpdir)
+
+       rootGroup.Client.DataDir = tmpdir
+
+       client, err := torrent.NewClient(&rootGroup.Client)
+       if err != nil {
+               log.Fatalf("error creating client: %s", err)
+       }
+       http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+               client.WriteStatus(w)
+       })
+       defer client.Close()
+
+       dstName := dstFileName(rootGroup.Pick)
+
+       f, err := os.Create(dstName)
+       if err != nil {
+               log.Fatal(err)
+       }
+       dstWriter := bufio.NewWriter(f)
+
+       done := make(chan struct{})
+       for _, arg := range posArgs {
+               t := func() torrent.Torrent {
+                       if strings.HasPrefix(arg, "magnet:") {
+                               t, err := client.AddMagnet(arg)
+                               if err != nil {
+                                       log.Fatalf("error adding magnet: %s", err)
+                               }
+                               return t
+                       } else {
+                               metaInfo, err := metainfo.LoadFromFile(arg)
+                               if err != nil {
+                                       log.Fatal(err)
+                               }
+                               t, err := client.AddTorrent(metaInfo)
+                               if err != nil {
+                                       log.Fatal(err)
+                               }
+                               return t
+                       }
+               }()
+               err := t.AddPeers(testPeers)
+               if err != nil {
+                       log.Fatal(err)
+               }
+
+               go func() {
+                       <-t.GotInfo()
+                       files := t.Files()
+                       for _, file := range files {
+                               if file.Path() == rootGroup.Pick {
+
+                                       log.Printf("Downloading file: %s", file.Path())
+
+                                       srcReader := io.NewSectionReader(t.NewReader(), file.Offset(), file.Length())
+                                       io.Copy(dstWriter, srcReader)
+                                       close(done)
+                                       break
+                               }
+                       }
+               }()
+       }
+
+       ticker := time.NewTicker(time.Second)
+waitDone:
+       for {
+               select {
+               case <-done:
+                       break waitDone
+               case <-ticker.C:
+                       os.Stdout.WriteString(progressLine(client))
+               }
+       }
+       if rootGroup.Client.Seed {
+               select {}
+       }
+}
index a3d97b247d3d9eafb20e53a6a62aaba358659f85..48fa8b6a83acb2e8ef3b107b679ae78a118ec468 100644 (file)
@@ -9,23 +9,38 @@ import (
        "os"
        "path/filepath"
 
-       "github.com/anacrolix/libtorgo/metainfo"
        "launchpad.net/gommap"
 
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/mmap_span"
 )
 
 var (
-       filePath = flag.String("torrent", "/path/to/the.torrent", "path of the torrent file")
-       dirPath  = flag.String("path", "/torrent/data", "path of the torrent data")
+       torrentPath = flag.String("torrent", "/path/to/the.torrent", "path of the torrent file")
+       dataPath    = flag.String("path", "/torrent/data", "path of the torrent data")
 )
 
-func init() {
-       flag.Parse()
+func fileToMmap(filename string, length int64, devZero *os.File) gommap.MMap {
+       osFile, err := os.Open(filename)
+       if err != nil {
+               log.Fatal(err)
+       }
+       mmapFd := osFile.Fd()
+       goMMap, err := gommap.MapRegion(mmapFd, 0, length, gommap.PROT_READ, gommap.MAP_PRIVATE)
+       if err != nil {
+               log.Fatal(err)
+       }
+       if int64(len(goMMap)) != length {
+               log.Printf("file mmap has wrong size: %#v", filename)
+       }
+       osFile.Close()
+
+       return goMMap
 }
 
 func main() {
-       metaInfo, err := metainfo.LoadFromFile(*filePath)
+       flag.Parse()
+       metaInfo, err := metainfo.LoadFromFile(*torrentPath)
        if err != nil {
                log.Fatal(err)
        }
@@ -34,29 +49,18 @@ func main() {
                log.Print(err)
        }
        defer devZero.Close()
-       var mMapSpan *mmap_span.MMapSpan
-       for _, file := range metaInfo.Info.Files {
-               filename := filepath.Join(append([]string{*dirPath, metaInfo.Info.Name}, file.Path...)...)
-               osFile, err := os.Open(filename)
-               mmapFd := osFile.Fd()
-               if err != nil {
-                       if pe, ok := err.(*os.PathError); ok && pe.Err.Error() == "no such file or directory" {
-                               mmapFd = devZero.Fd()
-                       } else {
-                               log.Fatal(err)
-                       }
-               }
-               goMMap, err := gommap.MapRegion(mmapFd, 0, file.Length, gommap.PROT_READ, gommap.MAP_PRIVATE)
-               if err != nil {
-                       log.Fatal(err)
-               }
-               if int64(len(goMMap)) != file.Length {
-                       log.Printf("file mmap has wrong size: %#v", filename)
+       mMapSpan := &mmap_span.MMapSpan{}
+       if len(metaInfo.Info.Files) > 0 {
+               for _, file := range metaInfo.Info.Files {
+                       filename := filepath.Join(append([]string{*dataPath, metaInfo.Info.Name}, file.Path...)...)
+                       goMMap := fileToMmap(filename, file.Length, devZero)
+                       mMapSpan.Append(goMMap)
                }
-               osFile.Close()
+               log.Println(len(metaInfo.Info.Files))
+       } else {
+               goMMap := fileToMmap(*dataPath, metaInfo.Info.Length, devZero)
                mMapSpan.Append(goMMap)
        }
-       log.Println(len(metaInfo.Info.Files))
        log.Println(mMapSpan.Size())
        log.Println(len(metaInfo.Info.Pieces))
        for piece := 0; piece < (len(metaInfo.Info.Pieces)+sha1.Size-1)/sha1.Size; piece++ {
index 6062d16904c4c5ad9a0c39cfbf34a4b6983ddcd3..88b186c763388931a1f20a0e858e74706515266f 100644 (file)
@@ -1,3 +1,4 @@
+// Downloads torrents from the command-line.
 package main
 
 import (
@@ -11,11 +12,11 @@ import (
        "time"
 
        _ "github.com/anacrolix/envpprof"
-       "github.com/anacrolix/libtorgo/metainfo"
        "github.com/dustin/go-humanize"
        "github.com/jessevdk/go-flags"
 
        "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 // fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0])
@@ -37,7 +38,7 @@ func resolvedPeerAddrs(ss []string) (ret []torrent.Peer, err error) {
 
 func bytesCompleted(tc *torrent.Client) (ret int64) {
        for _, t := range tc.Torrents() {
-               if t.Info != nil {
+               if t.Info() != nil {
                        ret += t.BytesCompleted()
                }
        }
@@ -48,11 +49,12 @@ func bytesCompleted(tc *torrent.Client) (ret int64) {
 func totalBytesEstimate(tc *torrent.Client) (ret int64) {
        var noInfo, hadInfo int64
        for _, t := range tc.Torrents() {
-               if t.Info == nil {
+               info := t.Info()
+               if info == nil {
                        noInfo++
                        continue
                }
-               ret += t.Info.TotalLength()
+               ret += info.TotalLength()
                hadInfo++
        }
        if hadInfo != 0 {
@@ -71,7 +73,6 @@ func main() {
        log.SetFlags(log.LstdFlags | log.Lshortfile)
        var rootGroup struct {
                Client    torrent.Config `group:"Client Options"`
-               Seed      bool           `long:"seed" description:"continue seeding torrents after completed"`
                TestPeers []string       `long:"test-peer" description:"address of peer to inject to every torrent"`
        }
        // Don't pass flags.PrintError because it's inconsistent with printing.
@@ -80,7 +81,7 @@ func main() {
        parser.Usage = "[OPTIONS] (magnet URI or .torrent file path)..."
        posArgs, err := parser.Parse()
        if err != nil {
-               fmt.Fprintln(os.Stderr, "Download from the BitTorrent network.\n")
+               fmt.Fprintln(os.Stderr, "Download from the BitTorrent network.")
                fmt.Println(err)
                os.Exit(2)
        }
@@ -126,7 +127,7 @@ func main() {
                        log.Fatal(err)
                }
                go func() {
-                       <-t.GotMetainfo
+                       <-t.GotInfo()
                        t.DownloadAll()
                }()
        }
@@ -149,7 +150,7 @@ waitDone:
                        os.Stdout.WriteString(progressLine(client))
                }
        }
-       if rootGroup.Seed {
+       if rootGroup.Client.Seed {
                select {}
        }
 }
index a7e786ae3fa2a7327f5ec55dba66466381cb8afd..1e1c61985821ff260262f80d6d40b83d25fc98eb 100644 (file)
@@ -1,3 +1,4 @@
+// Mounts a FUSE filesystem backed by torrents and magnet links.
 package main
 
 import (
@@ -99,10 +100,13 @@ func main() {
        client, err := torrent.NewClient(&torrent.Config{
                DataDir:         *downloadDir,
                DisableTrackers: *disableTrackers,
-               // DownloadStrategy: torrent.NewResponsiveDownloadStrategy(*readaheadBytes),
-               ListenAddr: *listenAddr,
-               NoUpload:   true, // Ensure that uploads are responsive.
+               ListenAddr:      *listenAddr,
+               NoUpload:        true, // Ensure that downloads are responsive.
        })
+       if err != nil {
+               log.Fatal(err)
+       }
+       // This is naturally exported via GOPPROF=http.
        http.DefaultServeMux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
                client.WriteStatus(w)
        })
@@ -126,13 +130,11 @@ func main() {
                                        }
                                }
                        case dirwatch.Removed:
-                               for _, t := range client.Torrents() {
-                                       if t.InfoHash != ev.InfoHash {
-                                               continue
-                                       }
-                                       t.Drop()
+                               T, ok := client.Torrent(ev.InfoHash)
+                               if !ok {
                                        break
                                }
+                               T.Drop()
                        }
                }
        }()
index 461e0f6dbd17afd7a86572931403213bf3a8c5eb..776fae380a6ff8da2b91c58bcf07bb0f63e818e5 100644 (file)
@@ -3,11 +3,11 @@ package main
 import (
        "flag"
        "log"
+       "math"
        "strings"
 
-       "github.com/anacrolix/libtorgo/metainfo"
-
        "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/tracker"
 )
 
@@ -27,8 +27,7 @@ func main() {
        flag.Parse()
        ar := tracker.AnnounceRequest{
                NumWant: -1,
-               // Go won't let me do uint64(-1), I'm sorry.
-               Left: 0xffffffffffffffff,
+               Left:    math.MaxUint64,
        }
        for _, arg := range flag.Args() {
                ts, err := argSpec(arg)
index f052cb56e544f1961ad40e38057b1828688c403f..b6921182a2fd6416e05cbc44baa68646fe7aa446 100644 (file)
--- a/config.go
+++ b/config.go
@@ -20,14 +20,17 @@ type Config struct {
        NoDHT bool `long:"disable-dht"`
        // Overrides the default DHT configuration.
        DHTConfig *dht.ServerConfig
-       // Don't send chunks to peers.
+       // Don't ever send chunks to peers.
        NoUpload bool `long:"no-upload"`
+       // Upload even after there's nothing in it for us. By default uploading is
+       // not altruistic.
+       Seed bool `long:"seed"`
        // User-provided Client peer ID. If not present, one is generated automatically.
        PeerID string
        // For the bittorrent protocol.
        DisableUTP bool
        // For the bittorrent protocol.
-       DisableTCP bool
+       DisableTCP bool `long:"disable-tcp"`
        // Don't automatically load "$ConfigDir/blocklist".
        NoDefaultBlocklist bool
        // Defaults to "$HOME/.config/torrent". This is where "blocklist",
@@ -39,4 +42,5 @@ type Config struct {
        // Called to instantiate storage for each added torrent. Provided backends
        // are in $REPO/data. If not set, the "file" implementation is used.
        TorrentDataOpener
+       DisableEncryption bool `long:"disable-encryption"`
 }
index b365f743d0e544f3834fc8dc6f727e37c6e7ec9f..471d3f593ce1aa9e5519face50db62cb5682892d 100644 (file)
@@ -2,6 +2,7 @@ package torrent
 
 import (
        "bufio"
+       "bytes"
        "container/list"
        "encoding"
        "errors"
@@ -12,8 +13,7 @@ import (
        "sync"
        "time"
 
-       "github.com/anacrolix/libtorgo/bencode"
-
+       "github.com/anacrolix/torrent/bencode"
        "github.com/anacrolix/torrent/internal/pieceordering"
        pp "github.com/anacrolix/torrent/peer_protocol"
 )
@@ -40,17 +40,20 @@ type connection struct {
        post      chan pp.Message
        writeCh   chan []byte
 
-       // The connections preferred order to download pieces.
+       // The connection's preferred order to download pieces. The index is the
+       // piece, the value is its priority.
        piecePriorities []int
        // The piece request order based on piece priorities.
        pieceRequestOrder *pieceordering.Instance
 
        UnwantedChunksReceived int
        UsefulChunksReceived   int
+       chunksSent             int
 
        lastMessageReceived     time.Time
        completedHandshake      time.Time
        lastUsefulChunkReceived time.Time
+       lastChunkSent           time.Time
 
        // Stuff controlled by the local peer.
        Interested       bool
@@ -101,22 +104,28 @@ func (cn *connection) localAddr() net.Addr {
 // Adjust piece position in the request order for this connection based on the
 // given piece priority.
 func (cn *connection) pendPiece(piece int, priority piecePriority) {
-       if priority == piecePriorityNone {
+       if priority == PiecePriorityNone {
                cn.pieceRequestOrder.DeletePiece(piece)
                return
        }
        pp := cn.piecePriorities[piece]
-       // Priority goes to Now, then Next in connection order. Then Readahead in
-       // by piece index. Then normal again by connection order.
+       // Priority regions not to scale. Within each region, piece is randomized
+       // according to connection.
+
+       // <-request first -- last->
+       // [ Now         ]
+       //  [ Next       ]
+       //   [ Readahead ]
+       //                [ Normal ]
        key := func() int {
                switch priority {
-               case piecePriorityNow:
+               case PiecePriorityNow:
                        return -3*len(cn.piecePriorities) + 3*pp
-               case piecePriorityNext:
+               case PiecePriorityNext:
                        return -2*len(cn.piecePriorities) + 2*pp
-               case piecePriorityReadahead:
+               case PiecePriorityReadahead:
                        return -len(cn.piecePriorities) + pp
-               case piecePriorityNormal:
+               case PiecePriorityNormal:
                        return pp
                default:
                        panic(priority)
@@ -226,20 +235,29 @@ func (cn *connection) statusFlags() (ret string) {
        return
 }
 
+func (cn *connection) String() string {
+       var buf bytes.Buffer
+       cn.WriteStatus(&buf, nil)
+       return buf.String()
+}
+
 func (cn *connection) WriteStatus(w io.Writer, t *torrent) {
        // \t isn't preserved in <pre> blocks?
-       fmt.Fprintf(w, "%q: %s-%s\n", cn.PeerID, cn.localAddr(), cn.remoteAddr())
+       fmt.Fprintf(w, "%+q: %s-%s\n", cn.PeerID, cn.localAddr(), cn.remoteAddr())
        fmt.Fprintf(w, "    last msg: %s, connected: %s, last useful chunk: %s\n",
                eventAgeString(cn.lastMessageReceived),
                eventAgeString(cn.completedHandshake),
                eventAgeString(cn.lastUsefulChunkReceived))
-       fmt.Fprintf(w, "    %s completed, good chunks: %d/%d reqs: %d-%d, flags: %s\n",
+       fmt.Fprintf(w,
+               "    %s completed, good chunks: %d/%d-%d reqq: %d-%d, flags: %s\n",
                cn.completedString(t),
                cn.UsefulChunksReceived,
                cn.UnwantedChunksReceived+cn.UsefulChunksReceived,
+               cn.chunksSent,
                len(cn.Requests),
                len(cn.PeerRequests),
-               cn.statusFlags())
+               cn.statusFlags(),
+       )
 }
 
 func (c *connection) Close() {
@@ -375,6 +393,7 @@ func (c *connection) Choke() {
        c.Post(pp.Message{
                Type: pp.Choke,
        })
+       c.PeerRequests = nil
        c.Choked = true
 }
 
index d93b7d05e2a627ed188ebdf18efdaf357e2df062..1ffb9aef3fb65005d1482f136e40f43867a161e4 100644 (file)
@@ -77,22 +77,22 @@ func (suite) TestPieceRequestOrder(t *C) {
                piecePriorities:   []int{1, 4, 0, 3, 2},
        }
        testRequestOrder(nil, c.pieceRequestOrder, t)
-       c.pendPiece(2, piecePriorityNone)
+       c.pendPiece(2, PiecePriorityNone)
        testRequestOrder(nil, c.pieceRequestOrder, t)
-       c.pendPiece(1, piecePriorityNormal)
-       c.pendPiece(2, piecePriorityNormal)
+       c.pendPiece(1, PiecePriorityNormal)
+       c.pendPiece(2, PiecePriorityNormal)
        testRequestOrder([]int{2, 1}, c.pieceRequestOrder, t)
-       c.pendPiece(0, piecePriorityNormal)
+       c.pendPiece(0, PiecePriorityNormal)
        testRequestOrder([]int{2, 0, 1}, c.pieceRequestOrder, t)
-       c.pendPiece(1, piecePriorityReadahead)
+       c.pendPiece(1, PiecePriorityReadahead)
        testRequestOrder([]int{1, 2, 0}, c.pieceRequestOrder, t)
-       c.pendPiece(4, piecePriorityNow)
+       c.pendPiece(4, PiecePriorityNow)
        // now(4), r(1), normal(0, 2)
        testRequestOrder([]int{4, 1, 2, 0}, c.pieceRequestOrder, t)
-       c.pendPiece(2, piecePriorityReadahead)
+       c.pendPiece(2, PiecePriorityReadahead)
        // N(4), R(1, 2), N(0)
        testRequestOrder([]int{4, 2, 1, 0}, c.pieceRequestOrder, t)
-       c.pendPiece(1, piecePriorityNow)
+       c.pendPiece(1, PiecePriorityNow)
        // now(4, 1), readahead(2), normal(0)
        // in the same order, the keys will be: -15+6, -15+12, -5, 1
        // so we test that a very low priority (for this connection), "now"
@@ -100,7 +100,7 @@ func (suite) TestPieceRequestOrder(t *C) {
        testRequestOrder([]int{4, 2, 1, 0}, c.pieceRequestOrder, t)
        // Note this intentially sets to None a piece that's not in the order.
        for i := range iter.N(5) {
-               c.pendPiece(i, piecePriorityNone)
+               c.pendPiece(i, PiecePriorityNone)
        }
        testRequestOrder(nil, c.pieceRequestOrder, t)
 }
diff --git a/data/blob/atime_darwin.go b/data/blob/atime_darwin.go
deleted file mode 100644 (file)
index 33091d5..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-package blob
-
-import (
-       "os"
-       "syscall"
-       "time"
-)
-
-func accessTime(fi os.FileInfo) time.Time {
-       ts := fi.Sys().(*syscall.Stat_t).Atimespec
-       return time.Unix(ts.Sec, ts.Nano())
-}
diff --git a/data/blob/atime_linux.go b/data/blob/atime_linux.go
deleted file mode 100644 (file)
index 3bcc054..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-package blob
-
-import (
-       "os"
-       "syscall"
-       "time"
-)
-
-func accessTime(fi os.FileInfo) time.Time {
-       ts := fi.Sys().(*syscall.Stat_t).Atim
-       return time.Unix(ts.Sec, ts.Nano())
-}
index a3ade8e687a6dd08547159b200af65d6eca6f84f..0afc047bc652343aeb3eed91af5401711e940479 100644 (file)
@@ -3,8 +3,9 @@ package blob
 import (
        "encoding/hex"
        "io"
+       "log"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 type data struct {
@@ -19,16 +20,36 @@ func (me *data) pieceHashHex(i int) string {
 func (me *data) Close() {}
 
 func (me *data) ReadAt(b []byte, off int64) (n int, err error) {
-       p := me.info.Piece(int(off / me.info.PieceLength))
-       f := me.store.pieceRead(p)
-       if f == nil {
-               err = io.ErrUnexpectedEOF
-               return
-       }
-       defer f.Close()
-       n, err = f.ReadAt(b, off%me.info.PieceLength)
-       if err == io.EOF {
-               err = io.ErrUnexpectedEOF
+       for len(b) != 0 {
+               if off >= me.info.TotalLength() {
+                       err = io.EOF
+                       break
+               }
+               p := me.info.Piece(int(off / me.info.PieceLength))
+               f := me.store.pieceRead(p)
+               if f == nil {
+                       log.Println("piece not found", p)
+                       err = io.ErrUnexpectedEOF
+                       break
+               }
+               b1 := b
+               maxN1 := int(p.Length() - off%me.info.PieceLength)
+               if len(b1) > maxN1 {
+                       b1 = b1[:maxN1]
+               }
+               var n1 int
+               n1, err = f.ReadAt(b1, off%me.info.PieceLength)
+               f.Close()
+               n += n1
+               off += int64(n1)
+               b = b[n1:]
+               if err == io.EOF {
+                       err = nil
+                       break
+               }
+               if err != nil {
+                       break
+               }
        }
        return
 }
@@ -79,11 +100,7 @@ func (me *data) WriteSectionTo(w io.Writer, off, n int64) (written int64, err er
                var pr io.ReadCloser
                pr, err = me.pieceReader(i, off)
                if err != nil {
-                       if err == io.EOF {
-                               err = nil
-                       }
                        return
-                       pr.Close()
                }
                var n1 int64
                n1, err = io.CopyN(w, pr, n)
index ddfcd599230206f857b64102eda6945553ef8b50..7331ce6e9d9ebf8c0f09a11261227ba7faf6ab2e 100644 (file)
@@ -10,11 +10,13 @@ import (
        "os"
        "path/filepath"
        "sort"
+       "sync"
        "time"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/missinggo"
 
        dataPkg "github.com/anacrolix/torrent/data"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 const (
@@ -23,8 +25,10 @@ const (
 )
 
 type store struct {
-       baseDir   string
-       capacity  int64
+       baseDir  string
+       capacity int64
+
+       mu        sync.Mutex
        completed map[[20]byte]struct{}
 }
 
@@ -41,7 +45,7 @@ func Capacity(bytes int64) StoreOption {
 }
 
 func NewStore(baseDir string, opt ...StoreOption) dataPkg.Store {
-       s := &store{baseDir, -1, nil}
+       s := &store{baseDir, -1, sync.Mutex{}, nil}
        for _, o := range opt {
                o(s)
        }
@@ -49,17 +53,19 @@ func NewStore(baseDir string, opt ...StoreOption) dataPkg.Store {
        return s
 }
 
-func hexStringPieceHashArray(s string) (ret [20]byte) {
+// Turns 40 byte hex string into its equivalent binary byte array.
+func hexStringPieceHashArray(s string) (ret [20]byte, ok bool) {
        if len(s) != 40 {
-               panic(s)
+               return
        }
        n, err := hex.Decode(ret[:], []byte(s))
        if err != nil {
-               panic(err)
+               return
        }
        if n != 20 {
                panic(n)
        }
+       ok = true
        return
 }
 
@@ -70,10 +76,11 @@ func (me *store) initCompleted() {
        }
        me.completed = make(map[[20]byte]struct{}, len(fis))
        for _, fi := range fis {
-               if len(fi.Name()) != 40 {
+               binHash, ok := hexStringPieceHashArray(fi.Name())
+               if !ok {
                        continue
                }
-               me.completed[hexStringPieceHashArray(fi.Name())] = struct{}{}
+               me.completed[binHash] = struct{}{}
        }
 }
 
@@ -117,7 +124,11 @@ func (me *store) pieceWrite(p metainfo.Piece) (f *os.File) {
        return
 }
 
+// Returns the file for the given piece, if it exists. It could be completed,
+// or incomplete.
 func (me *store) pieceRead(p metainfo.Piece) (f *os.File) {
+       me.mu.Lock()
+       defer me.mu.Unlock()
        f, err := os.Open(me.path(p, true))
        if err == nil {
                return
@@ -125,9 +136,9 @@ func (me *store) pieceRead(p metainfo.Piece) (f *os.File) {
        if !os.IsNotExist(err) {
                panic(err)
        }
-       // Ermahgerd, self heal. This occurs when the underlying data goes
-       // missing, likely due to a "cache flush", also known as deleting the
-       // files. TODO: Trigger an asynchronous initCompleted.
+       // Mark the file not completed, in case we thought it was. TODO: Trigger
+       // an asynchronous initCompleted to reinitialize the entire completed map
+       // as there are likely other files missing.
        delete(me.completed, sliceToPieceHashArray(p.Hash()))
        f, err = os.Open(me.path(p, false))
        if err == nil {
@@ -160,7 +171,10 @@ func (me *store) removeCompleted(name string) (err error) {
        if err != nil {
                return err
        }
-       delete(me.completed, hexStringPieceHashArray(name))
+       binHash, ok := hexStringPieceHashArray(name)
+       if ok {
+               delete(me.completed, binHash)
+       }
        return
 }
 
@@ -174,7 +188,7 @@ func (me fileInfoSorter) Len() int {
 
 func lastTime(fi os.FileInfo) (ret time.Time) {
        ret = fi.ModTime()
-       atime := accessTime(fi)
+       atime := missinggo.FileInfoAccessTime(fi)
        if atime.After(ret) {
                ret = atime
        }
index bc61d25b7931f4a41b3529b93dd19638f0d273f2..f783279ddf0a654b92f521ff54dafc7b42b8b732 100644 (file)
@@ -3,7 +3,7 @@ package data
 import (
        "io"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 type Store interface {
index e9741b06f754168b3f1fefc5e627dd2468888e9e..0c30bc50c14aa7c289eb871a6c9aa10df4c6faf6 100644 (file)
@@ -5,7 +5,7 @@ import (
        "os"
        "path/filepath"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 type data struct {
index d222366289c9ae3fa4b7141db033d919c976880c..7a0324b3655d4fbbe03752c094215af9696e6d8b 100644 (file)
@@ -5,9 +5,9 @@ import (
        "os"
        "path/filepath"
 
-       "github.com/anacrolix/libtorgo/metainfo"
        "launchpad.net/gommap"
 
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/mmap_span"
 )
 
diff --git a/dht/addr.go b/dht/addr.go
new file mode 100644 (file)
index 0000000..ea6589d
--- /dev/null
@@ -0,0 +1,41 @@
+package dht
+
+import (
+       "net"
+
+       "github.com/anacrolix/torrent/util"
+)
+
+// Used internally to refer to node network addresses.
+type dHTAddr interface {
+       net.Addr
+       UDPAddr() *net.UDPAddr
+       IP() net.IP
+}
+
+// Speeds up some of the commonly called Addr methods.
+type cachedAddr struct {
+       a  net.Addr
+       s  string
+       ip net.IP
+}
+
+func (ca cachedAddr) Network() string {
+       return ca.a.Network()
+}
+
+func (ca cachedAddr) String() string {
+       return ca.s
+}
+
+func (ca cachedAddr) UDPAddr() *net.UDPAddr {
+       return ca.a.(*net.UDPAddr)
+}
+
+func (ca cachedAddr) IP() net.IP {
+       return ca.ip
+}
+
+func newDHTAddr(addr net.Addr) dHTAddr {
+       return cachedAddr{addr, addr.String(), util.AddrIP(addr)}
+}
similarity index 63%
rename from dht/getpeers.go
rename to dht/announce.go
index 46ccd082691eb3f7dfb0836cb1101778d0a447e8..a179263b11e54d4f8271d815c22295e0d172df9e 100644 (file)
@@ -13,8 +13,14 @@ import (
        "github.com/anacrolix/torrent/util"
 )
 
-type peerDiscovery struct {
-       *peerStream
+// Maintains state for an ongoing Announce operation. An Announce is started
+// by calling Server.Announce.
+type Announce struct {
+       mu    sync.Mutex
+       Peers chan PeersValues
+       // Inner chan is set to nil when on close.
+       values              chan PeersValues
+       stop                chan struct{}
        triedAddrs          *bloom.BloomFilter
        pending             int
        server              *Server
@@ -24,13 +30,18 @@ type peerDiscovery struct {
        announcePortImplied bool
 }
 
-func (pd *peerDiscovery) NumContacted() int {
-       pd.mu.Lock()
-       defer pd.mu.Unlock()
-       return pd.numContacted
+// Returns the number of distinct remote addresses the announce has queried.
+func (me *Announce) NumContacted() int {
+       me.mu.Lock()
+       defer me.mu.Unlock()
+       return me.numContacted
 }
 
-func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*peerDiscovery, error) {
+// This is kind of the main thing you want to do with DHT. It traverses the
+// graph toward nodes that store peers for the infohash, streaming them to the
+// caller, and announcing the local node to each node if allowed and
+// specified.
+func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*Announce, error) {
        s.mu.Lock()
        startAddrs := func() (ret []dHTAddr) {
                for _, n := range s.closestGoodNodes(160, infoHash) {
@@ -40,7 +51,7 @@ func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*peerDis
        }()
        s.mu.Unlock()
        if len(startAddrs) == 0 {
-               addrs, err := bootstrapAddrs()
+               addrs, err := bootstrapAddrs(s.bootstrapNodes)
                if err != nil {
                        return nil, err
                }
@@ -48,12 +59,10 @@ func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*peerDis
                        startAddrs = append(startAddrs, newDHTAddr(addr))
                }
        }
-       disc := &peerDiscovery{
-               peerStream: &peerStream{
-                       Values: make(chan peerStreamValue, 100),
-                       stop:   make(chan struct{}),
-                       values: make(chan peerStreamValue),
-               },
+       disc := &Announce{
+               Peers:               make(chan PeersValues, 100),
+               stop:                make(chan struct{}),
+               values:              make(chan PeersValues),
                triedAddrs:          bloom.NewWithEstimates(1000, 0.5),
                server:              s,
                infoHash:            infoHash,
@@ -62,12 +71,12 @@ func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*peerDis
        }
        // Function ferries from values to Values until discovery is halted.
        go func() {
-               defer close(disc.Values)
+               defer close(disc.Peers)
                for {
                        select {
                        case psv := <-disc.values:
                                select {
-                               case disc.Values <- psv:
+                               case disc.Peers <- psv:
                                case <-disc.stop:
                                        return
                                }
@@ -87,7 +96,7 @@ func (s *Server) Announce(infoHash string, port int, impliedPort bool) (*peerDis
        return disc, nil
 }
 
-func (me *peerDiscovery) gotNodeAddr(addr dHTAddr) {
+func (me *Announce) gotNodeAddr(addr dHTAddr) {
        if util.AddrPort(addr) == 0 {
                // Not a contactable address.
                return
@@ -95,13 +104,13 @@ func (me *peerDiscovery) gotNodeAddr(addr dHTAddr) {
        if me.triedAddrs.Test([]byte(addr.String())) {
                return
        }
-       if me.server.ipBlocked(util.AddrIP(addr)) {
+       if me.server.ipBlocked(addr.UDPAddr().IP) {
                return
        }
        me.contact(addr)
 }
 
-func (me *peerDiscovery) contact(addr dHTAddr) {
+func (me *Announce) contact(addr dHTAddr) {
        me.numContacted++
        me.triedAddrs.Add([]byte(addr.String()))
        if err := me.getPeers(addr); err != nil {
@@ -111,7 +120,7 @@ func (me *peerDiscovery) contact(addr dHTAddr) {
        me.pending++
 }
 
-func (me *peerDiscovery) transactionClosed() {
+func (me *Announce) transactionClosed() {
        me.pending--
        if me.pending == 0 {
                me.close()
@@ -119,15 +128,15 @@ func (me *peerDiscovery) transactionClosed() {
        }
 }
 
-func (me *peerDiscovery) responseNode(node NodeInfo) {
+func (me *Announce) responseNode(node NodeInfo) {
        me.gotNodeAddr(node.Addr)
 }
 
-func (me *peerDiscovery) closingCh() chan struct{} {
-       return me.peerStream.stop
+func (me *Announce) closingCh() chan struct{} {
+       return me.stop
 }
 
-func (me *peerDiscovery) announcePeer(to dHTAddr, token string) {
+func (me *Announce) announcePeer(to dHTAddr, token string) {
        me.server.mu.Lock()
        err := me.server.announcePeer(to, me.infoHash, me.announcePort, token, me.announcePortImplied)
        me.server.mu.Unlock()
@@ -136,7 +145,7 @@ func (me *peerDiscovery) announcePeer(to dHTAddr, token string) {
        }
 }
 
-func (me *peerDiscovery) getPeers(addr dHTAddr) error {
+func (me *Announce) getPeers(addr dHTAddr) error {
        me.server.mu.Lock()
        defer me.server.mu.Unlock()
        t, err := me.server.getPeers(addr, me.infoHash)
@@ -157,11 +166,11 @@ func (me *peerDiscovery) getPeers(addr dHTAddr) error {
                        }
                        copy(nodeInfo.ID[:], m.ID())
                        select {
-                       case me.peerStream.values <- peerStreamValue{
+                       case me.values <- PeersValues{
                                Peers:    vs,
                                NodeInfo: nodeInfo,
                        }:
-                       case <-me.peerStream.stop:
+                       case <-me.stop:
                        }
                }
 
@@ -176,28 +185,22 @@ func (me *peerDiscovery) getPeers(addr dHTAddr) error {
        return nil
 }
 
-type peerStreamValue struct {
+// Corresponds to the "values" key in a get_peers KRPC response. A list of
+// peers that a node has reported as being in the swarm for a queried info
+// hash.
+type PeersValues struct {
        Peers    []util.CompactPeer // Peers given in get_peers response.
        NodeInfo                    // The node that gave the response.
 }
 
-// TODO: This was to be the shared publicly accessible part returned by DHT
-// functions that stream peers. Possibly not necessary anymore.
-type peerStream struct {
-       mu     sync.Mutex
-       Values chan peerStreamValue
-       // Inner chan is set to nil when on close.
-       values chan peerStreamValue
-       stop   chan struct{}
-}
-
-func (ps *peerStream) Close() {
-       ps.mu.Lock()
-       defer ps.mu.Unlock()
-       ps.close()
+// Stop the announce.
+func (me *Announce) Close() {
+       me.mu.Lock()
+       defer me.mu.Unlock()
+       me.close()
 }
 
-func (ps *peerStream) close() {
+func (ps *Announce) close() {
        select {
        case <-ps.stop:
        default:
index 5f70993438ca782b2e0cf7f19e5bca3d66cb7343..ff27fcff04afeaa9163bfff471bd425cfa5c31fa 100644 (file)
@@ -1,3 +1,8 @@
+// Package DHT implements a DHT for use with the BitTorrent protocol,
+// described in BEP 5: http://www.bittorrent.org/beps/bep_0005.html.
+//
+// Standard use involves creating a NewServer, and calling Announce on it with
+// the details of your local torrent client and infohash of interest.
 package dht
 
 import (
@@ -6,6 +11,7 @@ import (
        "encoding/binary"
        "errors"
        "fmt"
+       "hash/crc32"
        "io"
        "log"
        "math/big"
@@ -14,9 +20,9 @@ import (
        "os"
        "time"
 
-       "github.com/anacrolix/libtorgo/bencode"
        "github.com/anacrolix/sync"
 
+       "github.com/anacrolix/torrent/bencode"
        "github.com/anacrolix/torrent/iplist"
        "github.com/anacrolix/torrent/logonce"
        "github.com/anacrolix/torrent/util"
@@ -36,69 +42,64 @@ type transactionKey struct {
 type Server struct {
        id               string
        socket           net.PacketConn
-       transactions     map[transactionKey]*transaction
+       transactions     map[transactionKey]*Transaction
        transactionIDInt uint64
-       nodes            map[string]*Node // Keyed by dHTAddr.String().
+       nodes            map[string]*node // Keyed by dHTAddr.String().
        mu               sync.Mutex
        closed           chan struct{}
        passive          bool // Don't respond to queries.
        ipBlockList      *iplist.IPList
 
-       NumConfirmedAnnounces int
-}
-
-type dHTAddr interface {
-       net.Addr
-       UDPAddr() *net.UDPAddr
-}
-
-type cachedAddr struct {
-       a net.Addr
-       s string
-}
-
-func (ca cachedAddr) Network() string {
-       return ca.a.Network()
-}
-
-func (ca cachedAddr) String() string {
-       return ca.s
-}
-
-func (ca cachedAddr) UDPAddr() *net.UDPAddr {
-       return ca.a.(*net.UDPAddr)
-}
-
-func newDHTAddr(addr net.Addr) dHTAddr {
-       return cachedAddr{addr, addr.String()}
+       numConfirmedAnnounces int
+       bootstrapNodes        []string
+       config                ServerConfig
 }
 
 type ServerConfig struct {
-       Addr    string
-       Conn    net.PacketConn
-       Passive bool // Don't respond to queries.
-}
-
-type serverStats struct {
-       NumGoodNodes               int
-       NumNodes                   int
-       NumOutstandingTransactions int
-}
-
-func (s *Server) Stats() (ss serverStats) {
+       Addr string // Listen address. Used if Conn is nil.
+       Conn net.PacketConn
+       // Don't respond to queries from other nodes.
+       Passive bool
+       // DHT Bootstrap nodes
+       BootstrapNodes []string
+       // Disable the DHT security extension:
+       // http://www.libtorrent.org/dht_sec.html.
+       NoSecurity bool
+       // Initial IP blocklist to use. Applied before serving and bootstrapping
+       // begins.
+       IPBlocklist *iplist.IPList
+}
+
+type ServerStats struct {
+       // Count of nodes in the node table that responded to our last query or
+       // haven't yet been queried.
+       GoodNodes int
+       // Count of nodes in the node table.
+       Nodes int
+       // Transactions awaiting a response.
+       OutstandingTransactions int
+       // Individual announce_peer requests that got a success response.
+       ConfirmedAnnounces int
+}
+
+// Returns statistics for the server.
+func (s *Server) Stats() (ss ServerStats) {
        s.mu.Lock()
        defer s.mu.Unlock()
        for _, n := range s.nodes {
                if n.DefinitelyGood() {
-                       ss.NumGoodNodes++
+                       ss.GoodNodes++
                }
        }
-       ss.NumNodes = len(s.nodes)
-       ss.NumOutstandingTransactions = len(s.transactions)
+       ss.Nodes = len(s.nodes)
+       ss.OutstandingTransactions = len(s.transactions)
+       ss.ConfirmedAnnounces = s.numConfirmedAnnounces
        return
 }
 
-func (s *Server) LocalAddr() net.Addr {
+// Returns the listen address for the server. Packets arriving to this address
+// are processed by the server (unless aliens are involved).
+func (s *Server) Addr() net.Addr {
        return s.socket.LocalAddr()
 }
 
@@ -111,11 +112,15 @@ func makeSocket(addr string) (socket *net.UDPConn, err error) {
        return
 }
 
+// Create a new DHT server.
 func NewServer(c *ServerConfig) (s *Server, err error) {
        if c == nil {
                c = &ServerConfig{}
        }
-       s = &Server{}
+       s = &Server{
+               config:      *c,
+               ipBlockList: c.IPBlocklist,
+       }
        if c.Conn != nil {
                s.socket = c.Conn
        } else {
@@ -125,6 +130,7 @@ func NewServer(c *ServerConfig) (s *Server, err error) {
                }
        }
        s.passive = c.Passive
+       s.bootstrapNodes = c.BootstrapNodes
        err = s.init()
        if err != nil {
                return
@@ -143,12 +149,17 @@ func NewServer(c *ServerConfig) (s *Server, err error) {
        go func() {
                err := s.bootstrap()
                if err != nil {
-                       log.Printf("error bootstrapping DHT: %s", err)
+                       select {
+                       case <-s.closed:
+                       default:
+                               log.Printf("error bootstrapping DHT: %s", err)
+                       }
                }
        }()
        return
 }
 
+// Returns a description of the Server. Python repr-style.
 func (s *Server) String() string {
        return fmt.Sprintf("dht server on %s", s.socket.LocalAddr())
 }
@@ -180,11 +191,14 @@ func (nid0 *nodeID) Distance(nid1 *nodeID) (ret big.Int) {
        return
 }
 
-func (nid *nodeID) String() string {
-       return string(nid.i.Bytes())
+func (nid *nodeID) ByteString() string {
+       var buf [20]byte
+       b := nid.i.Bytes()
+       copy(buf[20-len(b):], b)
+       return string(buf[:])
 }
 
-type Node struct {
+type node struct {
        addr          dHTAddr
        id            nodeID
        announceToken string
@@ -194,24 +208,34 @@ type Node struct {
        lastSentQuery   time.Time
 }
 
-func (n *Node) idString() string {
-       return n.id.String()
+func (n *node) IsSecure() bool {
+       if n.id.IsUnset() {
+               return false
+       }
+       return nodeIdSecure(n.id.ByteString(), n.addr.IP())
+}
+
+func (n *node) idString() string {
+       return n.id.ByteString()
 }
 
-func (n *Node) SetIDFromBytes(b []byte) {
+func (n *node) SetIDFromBytes(b []byte) {
+       if len(b) != 20 {
+               panic(b)
+       }
        n.id.i.SetBytes(b)
        n.id.set = true
 }
 
-func (n *Node) SetIDFromString(s string) {
-       n.id.i.SetBytes([]byte(s))
+func (n *node) SetIDFromString(s string) {
+       n.SetIDFromBytes([]byte(s))
 }
 
-func (n *Node) IDNotSet() bool {
+func (n *node) IDNotSet() bool {
        return n.id.i.Int64() == 0
 }
 
-func (n *Node) NodeInfo() (ret NodeInfo) {
+func (n *node) NodeInfo() (ret NodeInfo) {
        ret.Addr = n.addr
        if n := copy(ret.ID[:], n.idString()); n != 20 {
                panic(n)
@@ -219,7 +243,7 @@ func (n *Node) NodeInfo() (ret NodeInfo) {
        return
 }
 
-func (n *Node) DefinitelyGood() bool {
+func (n *node) DefinitelyGood() bool {
        if len(n.idString()) != 20 {
                return false
        }
@@ -234,6 +258,10 @@ func (n *Node) DefinitelyGood() bool {
        return true
 }
 
+// A wrapper around the unmarshalled KRPC dict that constitutes messages in
+// the DHT. There are various helpers for extracting common data from the
+// message. In normal use, Msg is abstracted away for you, but it can be of
+// interest.
 type Msg map[string]interface{}
 
 var _ fmt.Stringer = Msg{}
@@ -316,7 +344,7 @@ func (m Msg) AnnounceToken() (token string, ok bool) {
        return
 }
 
-type transaction struct {
+type Transaction struct {
        mu             sync.Mutex
        remoteAddr     dHTAddr
        t              string
@@ -331,14 +359,15 @@ type transaction struct {
        userOnResponse func(Msg)
 }
 
-func (t *transaction) SetResponseHandler(f func(Msg)) {
+// Set a function to be called with the response.
+func (t *Transaction) SetResponseHandler(f func(Msg)) {
        t.mu.Lock()
        defer t.mu.Unlock()
        t.userOnResponse = f
        t.tryHandleResponse()
 }
 
-func (t *transaction) tryHandleResponse() {
+func (t *Transaction) tryHandleResponse() {
        if t.userOnResponse == nil {
                return
        }
@@ -351,7 +380,7 @@ func (t *transaction) tryHandleResponse() {
        }
 }
 
-func (t *transaction) Key() transactionKey {
+func (t *Transaction) key() transactionKey {
        return transactionKey{
                t.remoteAddr.String(),
                t.t,
@@ -362,11 +391,11 @@ func jitterDuration(average time.Duration, plusMinus time.Duration) time.Duratio
        return average - plusMinus/2 + time.Duration(rand.Int63n(int64(plusMinus)))
 }
 
-func (t *transaction) startTimer() {
+func (t *Transaction) startTimer() {
        t.timer = time.AfterFunc(jitterDuration(queryResendEvery, time.Second), t.timerCallback)
 }
 
-func (t *transaction) timerCallback() {
+func (t *Transaction) timerCallback() {
        t.mu.Lock()
        defer t.mu.Unlock()
        select {
@@ -385,7 +414,7 @@ func (t *transaction) timerCallback() {
        }
 }
 
-func (t *transaction) sendQuery() error {
+func (t *Transaction) sendQuery() error {
        err := t.s.writeToNode(t.queryPacket, t.remoteAddr)
        if err != nil {
                return err
@@ -394,7 +423,7 @@ func (t *transaction) sendQuery() error {
        return nil
 }
 
-func (t *transaction) timeout() {
+func (t *Transaction) timeout() {
        go func() {
                t.s.mu.Lock()
                defer t.s.mu.Unlock()
@@ -403,7 +432,7 @@ func (t *transaction) timeout() {
        t.close()
 }
 
-func (t *transaction) close() {
+func (t *Transaction) close() {
        if t.closing() {
                return
        }
@@ -419,7 +448,7 @@ func (t *transaction) close() {
        }()
 }
 
-func (t *transaction) closing() bool {
+func (t *Transaction) closing() bool {
        select {
        case <-t.done:
                return true
@@ -428,13 +457,14 @@ func (t *transaction) closing() bool {
        }
 }
 
-func (t *transaction) Close() {
+// Abandon the transaction.
+func (t *Transaction) Close() {
        t.mu.Lock()
        defer t.mu.Unlock()
        t.close()
 }
 
-func (t *transaction) handleResponse(m Msg) {
+func (t *Transaction) handleResponse(m Msg) {
        t.mu.Lock()
        if t.closing() {
                t.mu.Unlock()
@@ -457,6 +487,60 @@ func (t *transaction) handleResponse(m Msg) {
        t.tryHandleResponse()
 }
 
+func maskForIP(ip net.IP) []byte {
+       switch {
+       case ip.To4() != nil:
+               return []byte{0x03, 0x0f, 0x3f, 0xff}
+       default:
+               return []byte{0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff}
+       }
+}
+
+// Generate the CRC used to make or validate secure node ID.
+func crcIP(ip net.IP, rand uint8) uint32 {
+       if ip4 := ip.To4(); ip4 != nil {
+               ip = ip4
+       }
+       // Copy IP so we can make changes. Go sux at this.
+       ip = append(make(net.IP, 0, len(ip)), ip...)
+       mask := maskForIP(ip)
+       for i := range mask {
+               ip[i] &= mask[i]
+       }
+       r := rand & 7
+       ip[0] |= r << 5
+       return crc32.Checksum(ip[:len(mask)], crc32.MakeTable(crc32.Castagnoli))
+}
+
+// Makes a node ID valid, in-place.
+func secureNodeId(id []byte, ip net.IP) {
+       crc := crcIP(ip, id[19])
+       id[0] = byte(crc >> 24 & 0xff)
+       id[1] = byte(crc >> 16 & 0xff)
+       id[2] = byte(crc>>8&0xf8) | id[2]&7
+}
+
+// http://www.libtorrent.org/dht_sec.html
+func nodeIdSecure(id string, ip net.IP) bool {
+       if len(id) != 20 {
+               panic(fmt.Sprintf("%q", id))
+       }
+       if ip4 := ip.To4(); ip4 != nil {
+               ip = ip4
+       }
+       crc := crcIP(ip, id[19])
+       if id[0] != byte(crc>>24&0xff) {
+               return false
+       }
+       if id[1] != byte(crc>>16&0xff) {
+               return false
+       }
+       if id[2]&0xf8 != byte(crc>>8&0xf8) {
+               return false
+       }
+       return true
+}
+
 func (s *Server) setDefaults() (err error) {
        if s.id == "" {
                var id [20]byte
@@ -473,12 +557,14 @@ func (s *Server) setDefaults() (err error) {
                if len(id) != 20 {
                        panic(len(id))
                }
+               secureNodeId(id[:], util.AddrIP(s.socket.LocalAddr()))
                s.id = string(id[:])
        }
-       s.nodes = make(map[string]*Node, 10000)
+       s.nodes = make(map[string]*node, maxNodes)
        return
 }
 
+// Packets to and from any address matching a range in the list are dropped.
 func (s *Server) SetIPBlockList(list *iplist.IPList) {
        s.mu.Lock()
        defer s.mu.Unlock()
@@ -491,7 +577,7 @@ func (s *Server) init() (err error) {
                return
        }
        s.closed = make(chan struct{})
-       s.transactions = make(map[transactionKey]*transaction)
+       s.transactions = make(map[transactionKey]*Transaction)
        return
 }
 
@@ -499,6 +585,7 @@ func (s *Server) processPacket(b []byte, addr dHTAddr) {
        var d Msg
        err := bencode.Unmarshal(b, &d)
        if err != nil {
+               readUnmarshalError.Add(1)
                func() {
                        if se, ok := err.(*bencode.SyntaxError); ok {
                                // The message was truncated.
@@ -514,13 +601,14 @@ func (s *Server) processPacket(b []byte, addr dHTAddr) {
                                        return
                                }
                        }
-                       log.Printf("%s: received bad krpc message from %s: %s: %q", s, addr, err, b)
+                       log.Printf("%s: received bad krpc message from %s: %s: %+q", s, addr, err, b)
                }()
                return
        }
        s.mu.Lock()
        defer s.mu.Unlock()
        if d["y"] == "q" {
+               readQuery.Add(1)
                s.handleQuery(addr, d)
                return
        }
@@ -529,7 +617,7 @@ func (s *Server) processPacket(b []byte, addr dHTAddr) {
                //log.Printf("unexpected message: %#v", d)
                return
        }
-       node := s.getNode(addr)
+       node := s.getNode(addr, d.ID())
        node.lastGotResponse = time.Now()
        // TODO: Update node ID as this is an authoritative packet.
        go t.handleResponse(d)
@@ -543,10 +631,18 @@ func (s *Server) serve() error {
                if err != nil {
                        return err
                }
+               read.Add(1)
                if n == len(b) {
                        logonce.Stderr.Printf("received dht packet exceeds buffer size")
                        continue
                }
+               s.mu.Lock()
+               blocked := s.ipBlocked(util.AddrIP(addr))
+               s.mu.Unlock()
+               if blocked {
+                       readBlocked.Add(1)
+                       continue
+               }
                s.processPacket(b[:n], newDHTAddr(addr))
        }
 }
@@ -558,19 +654,17 @@ func (s *Server) ipBlocked(ip net.IP) bool {
        return s.ipBlockList.Lookup(ip) != nil
 }
 
+// Adds directly to the node table.
 func (s *Server) AddNode(ni NodeInfo) {
        s.mu.Lock()
        defer s.mu.Unlock()
        if s.nodes == nil {
-               s.nodes = make(map[string]*Node)
-       }
-       n := s.getNode(ni.Addr)
-       if n.IDNotSet() {
-               n.SetIDFromBytes(ni.ID[:])
+               s.nodes = make(map[string]*node)
        }
+       s.getNode(ni.Addr, string(ni.ID[:]))
 }
 
-func (s *Server) nodeByID(id string) *Node {
+func (s *Server) nodeByID(id string) *node {
        for _, node := range s.nodes {
                if node.idString() == id {
                        return node
@@ -581,7 +675,7 @@ func (s *Server) nodeByID(id string) *Node {
 
 func (s *Server) handleQuery(source dHTAddr, m Msg) {
        args := m["a"].(map[string]interface{})
-       node := s.getNode(source)
+       node := s.getNode(source, m.ID())
        node.SetIDFromString(args["id"].(string))
        node.lastGotQuery = time.Now()
        // Don't respond.
@@ -656,7 +750,7 @@ func (s *Server) reply(addr dHTAddr, t string, r map[string]interface{}) {
        if r == nil {
                r = make(map[string]interface{}, 1)
        }
-       r["id"] = s.IDString()
+       r["id"] = s.ID()
        m := map[string]interface{}{
                "t": t,
                "y": "r",
@@ -672,19 +766,33 @@ func (s *Server) reply(addr dHTAddr, t string, r map[string]interface{}) {
        }
 }
 
-func (s *Server) getNode(addr dHTAddr) (n *Node) {
+// Returns a node struct for the addr. It is taken from the table or created
+// and possibly added if required and meets validity constraints.
+func (s *Server) getNode(addr dHTAddr, id string) (n *node) {
        addrStr := addr.String()
        n = s.nodes[addrStr]
-       if n == nil {
-               n = &Node{
-                       addr: addr,
-               }
-               if len(s.nodes) < maxNodes {
-                       s.nodes[addrStr] = n
+       if n != nil {
+               if id != "" {
+                       n.SetIDFromString(id)
                }
+               return
+       }
+       n = &node{
+               addr: addr,
+       }
+       if len(id) == 20 {
+               n.SetIDFromString(id)
+       }
+       if len(s.nodes) >= maxNodes {
+               return
+       }
+       if !s.config.NoSecurity && !n.IsSecure() {
+               return
        }
+       s.nodes[addrStr] = n
        return
 }
+
 func (s *Server) nodeTimedOut(addr dHTAddr) {
        node, ok := s.nodes[addr.String()]
        if !ok {
@@ -718,7 +826,7 @@ func (s *Server) writeToNode(b []byte, node dHTAddr) (err error) {
        return
 }
 
-func (s *Server) findResponseTransaction(transactionID string, sourceNode dHTAddr) *transaction {
+func (s *Server) findResponseTransaction(transactionID string, sourceNode dHTAddr) *Transaction {
        return s.transactions[transactionKey{
                sourceNode.String(),
                transactionID}]
@@ -731,30 +839,32 @@ func (s *Server) nextTransactionID() string {
        return string(b[:n])
 }
 
-func (s *Server) deleteTransaction(t *transaction) {
-       delete(s.transactions, t.Key())
+func (s *Server) deleteTransaction(t *Transaction) {
+       delete(s.transactions, t.key())
 }
 
-func (s *Server) addTransaction(t *transaction) {
-       if _, ok := s.transactions[t.Key()]; ok {
+func (s *Server) addTransaction(t *Transaction) {
+       if _, ok := s.transactions[t.key()]; ok {
                panic("transaction not unique")
        }
-       s.transactions[t.Key()] = t
+       s.transactions[t.key()] = t
 }
 
-func (s *Server) IDString() string {
+// Returns the 20-byte server ID. This is the ID used to communicate with the
+// DHT network.
+func (s *Server) ID() string {
        if len(s.id) != 20 {
                panic("bad node id")
        }
        return s.id
 }
 
-func (s *Server) query(node dHTAddr, q string, a map[string]interface{}, onResponse func(Msg)) (t *transaction, err error) {
+func (s *Server) query(node dHTAddr, q string, a map[string]interface{}, onResponse func(Msg)) (t *Transaction, err error) {
        tid := s.nextTransactionID()
        if a == nil {
                a = make(map[string]interface{}, 1)
        }
-       a["id"] = s.IDString()
+       a["id"] = s.ID()
        d := map[string]interface{}{
                "t": tid,
                "y": "q",
@@ -765,7 +875,7 @@ func (s *Server) query(node dHTAddr, q string, a map[string]interface{}, onRespo
        if err != nil {
                return
        }
-       t = &transaction{
+       t = &Transaction{
                remoteAddr:  node,
                t:           tid,
                response:    make(chan Msg, 1),
@@ -778,12 +888,13 @@ func (s *Server) query(node dHTAddr, q string, a map[string]interface{}, onRespo
        if err != nil {
                return
        }
-       s.getNode(node).lastSentQuery = time.Now()
+       s.getNode(node, "").lastSentQuery = time.Now()
        t.startTimer()
        s.addTransaction(t)
        return
 }
 
+// The size in bytes of a NodeInfo in its compact binary representation.
 const CompactNodeInfoLen = 26
 
 type NodeInfo struct {
@@ -791,6 +902,8 @@ type NodeInfo struct {
        Addr dHTAddr
 }
 
+// Writes the node info to its compact binary representation in b. See
+// CompactNodeInfoLen.
 func (ni *NodeInfo) PutCompact(b []byte) error {
        if n := copy(b[:], ni.ID[:]); n != 20 {
                panic(n)
@@ -818,29 +931,13 @@ func (cni *NodeInfo) UnmarshalCompact(b []byte) error {
        return nil
 }
 
-func (s *Server) Ping(node *net.UDPAddr) (*transaction, error) {
+// Sends a ping query to the address given.
+func (s *Server) Ping(node *net.UDPAddr) (*Transaction, error) {
        s.mu.Lock()
        defer s.mu.Unlock()
        return s.query(newDHTAddr(node), "ping", nil, nil)
 }
 
-// Announce a local peer. This can only be done to nodes that gave us an
-// announce token, which is received in responses during GetPeers. It's
-// recommended then that GetPeers is called before this method.
-func (s *Server) AnnouncePeer(port int, impliedPort bool, infoHash string) (err error) {
-       s.mu.Lock()
-       defer s.mu.Unlock()
-       for _, node := range s.closestNodes(160, nodeIDFromString(infoHash), func(n *Node) bool {
-               return n.announceToken != ""
-       }) {
-               err = s.announcePeer(node.addr, infoHash, port, node.announceToken, impliedPort)
-               if err != nil {
-                       break
-               }
-       }
-       return
-}
-
 func (s *Server) announcePeer(node dHTAddr, infoHash string, port int, token string, impliedPort bool) (err error) {
        if port == 0 && !impliedPort {
                return errors.New("nothing to announce")
@@ -861,7 +958,7 @@ func (s *Server) announcePeer(node dHTAddr, infoHash string, port int, token str
                        logonce.Stderr.Printf("announce_peer response: %s", err)
                        return
                }
-               s.NumConfirmedAnnounces++
+               s.numConfirmedAnnounces++
        })
        return
 }
@@ -879,13 +976,13 @@ func (s *Server) liftNodes(d Msg) {
                if s.ipBlocked(util.AddrIP(cni.Addr)) {
                        continue
                }
-               n := s.getNode(cni.Addr)
+               n := s.getNode(cni.Addr, string(cni.ID[:]))
                n.SetIDFromBytes(cni.ID[:])
        }
 }
 
 // Sends a find_node query to addr. targetID is the node we're looking for.
-func (s *Server) findNode(addr dHTAddr, targetID string) (t *transaction, err error) {
+func (s *Server) findNode(addr dHTAddr, targetID string) (t *Transaction, err error) {
        t, err = s.query(addr, "find_node", map[string]interface{}{"target": targetID}, func(d Msg) {
                // Scrape peers from the response to put in the server's table before
                // handing the response back to the caller.
@@ -934,7 +1031,7 @@ func (m Msg) Values() (vs []util.CompactPeer) {
        return
 }
 
-func (s *Server) getPeers(addr dHTAddr, infoHash string) (t *transaction, err error) {
+func (s *Server) getPeers(addr dHTAddr, infoHash string) (t *Transaction, err error) {
        if len(infoHash) != 20 {
                err = fmt.Errorf("infohash has bad length")
                return
@@ -943,17 +1040,21 @@ func (s *Server) getPeers(addr dHTAddr, infoHash string) (t *transaction, err er
                s.liftNodes(m)
                at, ok := m.AnnounceToken()
                if ok {
-                       s.getNode(addr).announceToken = at
+                       s.getNode(addr, m.ID()).announceToken = at
                }
        })
        return
 }
 
-func bootstrapAddrs() (addrs []*net.UDPAddr, err error) {
-       for _, addrStr := range []string{
-               "router.utorrent.com:6881",
-               "router.bittorrent.com:6881",
-       } {
+func bootstrapAddrs(nodeAddrs []string) (addrs []*net.UDPAddr, err error) {
+       bootstrapNodes := nodeAddrs
+       if len(bootstrapNodes) == 0 {
+               bootstrapNodes = []string{
+                       "router.utorrent.com:6881",
+                       "router.bittorrent.com:6881",
+               }
+       }
+       for _, addrStr := range bootstrapNodes {
                udpAddr, err := net.ResolveUDPAddr("udp4", addrStr)
                if err != nil {
                        continue
@@ -966,13 +1067,25 @@ func bootstrapAddrs() (addrs []*net.UDPAddr, err error) {
        return
 }
 
+// Adds bootstrap nodes directly to table, if there's room. Node ID security
+// is bypassed, but the IP blocklist is not.
 func (s *Server) addRootNodes() error {
-       addrs, err := bootstrapAddrs()
+       addrs, err := bootstrapAddrs(s.bootstrapNodes)
        if err != nil {
                return err
        }
        for _, addr := range addrs {
-               s.nodes[addr.String()] = &Node{
+               if len(s.nodes) >= maxNodes {
+                       break
+               }
+               if s.nodes[addr.String()] != nil {
+                       continue
+               }
+               if s.ipBlocked(addr.IP) {
+                       log.Printf("dht root node is in the blocklist: %s", addr.IP)
+                       continue
+               }
+               s.nodes[addr.String()] = &node{
                        addr: newDHTAddr(addr),
                }
        }
@@ -992,7 +1105,7 @@ func (s *Server) bootstrap() (err error) {
        for {
                var outstanding sync.WaitGroup
                for _, node := range s.nodes {
-                       var t *transaction
+                       var t *Transaction
                        t, err = s.findNode(node.addr, s.id)
                        if err != nil {
                                err = fmt.Errorf("error sending find_node: %s", err)
@@ -1034,12 +1147,14 @@ func (s *Server) numGoodNodes() (num int) {
        return
 }
 
+// Returns how many nodes are in the node table.
 func (s *Server) NumNodes() int {
        s.mu.Lock()
        defer s.mu.Unlock()
        return len(s.nodes)
 }
 
+// Exports the current node table.
 func (s *Server) Nodes() (nis []NodeInfo) {
        s.mu.Lock()
        defer s.mu.Unlock()
@@ -1058,6 +1173,7 @@ func (s *Server) Nodes() (nis []NodeInfo) {
        return
 }
 
+// Stops the server network activity. This is all that's required to clean-up a Server.
 func (s *Server) Close() {
        s.mu.Lock()
        select {
@@ -1076,13 +1192,13 @@ func init() {
        maxDistance.SetBit(&zero, 160, 1)
 }
 
-func (s *Server) closestGoodNodes(k int, targetID string) []*Node {
-       return s.closestNodes(k, nodeIDFromString(targetID), func(n *Node) bool { return n.DefinitelyGood() })
+func (s *Server) closestGoodNodes(k int, targetID string) []*node {
+       return s.closestNodes(k, nodeIDFromString(targetID), func(n *node) bool { return n.DefinitelyGood() })
 }
 
-func (s *Server) closestNodes(k int, target nodeID, filter func(*Node) bool) []*Node {
+func (s *Server) closestNodes(k int, target nodeID, filter func(*node) bool) []*node {
        sel := newKClosestNodesSelector(k, target)
-       idNodes := make(map[string]*Node, len(s.nodes))
+       idNodes := make(map[string]*node, len(s.nodes))
        for _, node := range s.nodes {
                if !filter(node) {
                        continue
@@ -1091,9 +1207,9 @@ func (s *Server) closestNodes(k int, target nodeID, filter func(*Node) bool) []*
                idNodes[node.idString()] = node
        }
        ids := sel.IDs()
-       ret := make([]*Node, 0, len(ids))
+       ret := make([]*node, 0, len(ids))
        for _, id := range ids {
-               ret = append(ret, idNodes[id.String()])
+               ret = append(ret, idNodes[id.ByteString()])
        }
        return ret
 }
index a2e5d11ec597cfaafff61cca19ebd3d8ce17c6b2..8091242a446ceb0f90fcfb67089113a4220362e1 100644 (file)
@@ -1,10 +1,13 @@
 package dht
 
 import (
+       "encoding/hex"
        "math/big"
        "math/rand"
        "net"
        "testing"
+
+       "github.com/anacrolix/torrent/util"
 )
 
 func TestSetNilBigInt(t *testing.T) {
@@ -94,9 +97,9 @@ func TestClosestNodes(t *testing.T) {
        }
        m := map[string]bool{}
        for _, id := range cn.IDs() {
-               m[id.String()] = true
+               m[id.ByteString()] = true
        }
-       if !m[testIDs[3].String()] || !m[testIDs[4].String()] {
+       if !m[testIDs[3].ByteString()] || !m[testIDs[4].ByteString()] {
                t.FailNow()
        }
 }
@@ -142,7 +145,7 @@ func TestPing(t *testing.T) {
        defer srv0.Close()
        tn, err := srv.Ping(&net.UDPAddr{
                IP:   []byte{127, 0, 0, 1},
-               Port: srv0.LocalAddr().(*net.UDPAddr).Port,
+               Port: srv0.Addr().(*net.UDPAddr).Port,
        })
        if err != nil {
                t.Fatal(err)
@@ -150,9 +153,61 @@ func TestPing(t *testing.T) {
        defer tn.Close()
        ok := make(chan bool)
        tn.SetResponseHandler(func(msg Msg) {
-               ok <- msg.ID() == srv0.IDString()
+               ok <- msg.ID() == srv0.ID()
        })
        if !<-ok {
                t.FailNow()
        }
 }
+
+func TestDHTSec(t *testing.T) {
+       for _, case_ := range []struct {
+               ipStr     string
+               nodeIDHex string
+               valid     bool
+       }{
+               // These 5 are from the spec example. They are all valid.
+               {"124.31.75.21", "5fbfbff10c5d6a4ec8a88e4c6ab4c28b95eee401", true},
+               {"21.75.31.124", "5a3ce9c14e7a08645677bbd1cfe7d8f956d53256", true},
+               {"65.23.51.170", "a5d43220bc8f112a3d426c84764f8c2a1150e616", true},
+               {"84.124.73.14", "1b0321dd1bb1fe518101ceef99462b947a01ff41", true},
+               {"43.213.53.83", "e56f6cbf5b7c4be0237986d5243b87aa6d51305a", true},
+               // spec[0] with one of the rand() bytes changed. Valid.
+               {"124.31.75.21", "5fbfbff10c5d7a4ec8a88e4c6ab4c28b95eee401", true},
+               // spec[1] with the 21st leading bit changed. Not Valid.
+               {"21.75.31.124", "5a3ce1c14e7a08645677bbd1cfe7d8f956d53256", false},
+               // spec[2] with the 22nd leading bit changed. Valid.
+               {"65.23.51.170", "a5d43620bc8f112a3d426c84764f8c2a1150e616", true},
+               // spec[3] with the 4th last bit changed. Valid.
+               {"84.124.73.14", "1b0321dd1bb1fe518101ceef99462b947a01fe01", true},
+               // spec[4] with the 3rd last bit changed. Not valid.
+               {"43.213.53.83", "e56f6cbf5b7c4be0237986d5243b87aa6d51303e", false},
+       } {
+               ip := net.ParseIP(case_.ipStr)
+               id, err := hex.DecodeString(case_.nodeIDHex)
+               if err != nil {
+                       t.Fatal(err)
+               }
+               secure := nodeIdSecure(string(id), ip)
+               if secure != case_.valid {
+                       t.Fatalf("case failed: %v", case_)
+               }
+               if !secure {
+                       secureNodeId(id, ip)
+                       if !nodeIdSecure(string(id), ip) {
+                               t.Fatal("failed to secure node id")
+                       }
+               }
+       }
+}
+
+func TestServerDefaultNodeIdSecure(t *testing.T) {
+       s, err := NewServer(nil)
+       if err != nil {
+               t.Fatal(err)
+       }
+       defer s.Close()
+       if !nodeIdSecure(s.ID(), util.AddrIP(s.Addr())) {
+               t.Fatal("not secure")
+       }
+}
diff --git a/dht/expvar.go b/dht/expvar.go
new file mode 100644 (file)
index 0000000..af31d08
--- /dev/null
@@ -0,0 +1,12 @@
+package dht
+
+import (
+       "expvar"
+)
+
+var (
+       read               = expvar.NewInt("dhtRead")
+       readBlocked        = expvar.NewInt("dhtReadBlocked")
+       readUnmarshalError = expvar.NewInt("dhtReadUnmarshalError")
+       readQuery          = expvar.NewInt("dhtReadQuery")
+)
diff --git a/doc.go b/doc.go
new file mode 100644 (file)
index 0000000..a15349e
--- /dev/null
+++ b/doc.go
@@ -0,0 +1,27 @@
+/*
+Package torrent implements a torrent client. Goals include:
+ * Configurable data storage, such as file, mmap, and piece-based.
+ * Downloading on demand: torrent.Reader will request only the data required to
+   satisfy Reads, which is ideal for streaming and torrentfs.
+
+BitTorrent features implemented include:
+ * Protocol obfuscation
+ * DHT
+ * uTP
+ * PEX
+ * Magnet
+ * IP Blocklists
+ * Some IPv6
+ * UDP Trackers
+
+ConfigDir
+
+A Client has a configurable ConfigDir that defaults to $HOME/.config/torrent.
+Torrent metainfo files are cached at $CONFIGDIR/torrents/$infohash.torrent.
+Infohashes in $CONFIGDIR/banned_infohashes cannot be added to the Client. A
+P2P Plaintext Format blocklist is loaded from a file at the location specified
+by the environment variable TORRENT_BLOCKLIST_FILE if set, otherwise from
+$CONFIGDIR/blocklist.
+
+*/
+package torrent
diff --git a/example_test.go b/example_test.go
new file mode 100644 (file)
index 0000000..9dc7502
--- /dev/null
@@ -0,0 +1,30 @@
+package torrent_test
+
+import (
+       "io"
+       "log"
+
+       "github.com/anacrolix/torrent"
+)
+
+func Example() {
+       c, _ := torrent.NewClient(nil)
+       defer c.Close()
+       t, _ := c.AddMagnet("magnet:?xt=urn:btih:ZOCMZQIPFFW7OLLMIC5HUB6BPCSDEOQU")
+       <-t.GotInfo()
+       t.DownloadAll()
+       c.WaitAll()
+       log.Print("ermahgerd, torrent downloaded")
+}
+
+func Example_fileReader() {
+       var (
+               t torrent.Torrent
+               f torrent.File
+       )
+       r := t.NewReader()
+       defer r.Close()
+       // Access the parts of the torrent pertaining to f. Data will be
+       // downloaded as required, per the configuration of the torrent.Reader.
+       _ = io.NewSectionReader(r, f.Offset(), f.Length())
+}
diff --git a/file.go b/file.go
new file mode 100644 (file)
index 0000000..688fc2f
--- /dev/null
+++ b/file.go
@@ -0,0 +1,67 @@
+package torrent
+
+import "github.com/anacrolix/torrent/metainfo"
+
+// Provides access to regions of torrent data that correspond to its files.
+type File struct {
+       t      Torrent
+       path   string
+       offset int64
+       length int64
+       fi     metainfo.FileInfo
+}
+
+// Data for this file begins this far into the torrent.
+func (f *File) Offset() int64 {
+       return f.offset
+}
+
+func (f File) FileInfo() metainfo.FileInfo {
+       return f.fi
+}
+
+func (f File) Path() string {
+       return f.path
+}
+
+func (f *File) Length() int64 {
+       return f.length
+}
+
+type FilePieceState struct {
+       Bytes int64 // Bytes within the piece that are part of this File.
+       PieceState
+}
+
+// Returns the state of pieces in this file.
+func (f *File) State() (ret []FilePieceState) {
+       f.t.cl.mu.Lock()
+       defer f.t.cl.mu.Unlock()
+       pieceSize := int64(f.t.usualPieceSize())
+       off := f.offset % pieceSize
+       remaining := f.length
+       for i := int(f.offset / pieceSize); ; i++ {
+               if remaining == 0 {
+                       break
+               }
+               len1 := pieceSize - off
+               if len1 > remaining {
+                       len1 = remaining
+               }
+               ret = append(ret, FilePieceState{len1, f.t.pieceState(i)})
+               off = 0
+               remaining -= len1
+       }
+       return
+}
+
+func (f *File) PrioritizeRegion(off, len int64) {
+       if off < 0 || off >= f.length {
+               return
+       }
+       if off+len > f.length {
+               len = f.length - off
+       }
+       off += f.offset
+       f.t.SetRegionPriority(off, len)
+}
diff --git a/fs/TODO b/fs/TODO
new file mode 100644 (file)
index 0000000..9ab12b5
--- /dev/null
+++ b/fs/TODO
@@ -0,0 +1 @@
+ * Reinstate InitAsyncRead, or find out if it's worth it. Upstream made it a PITA to apply it automatically.
index 270bf3e47258f32d354b019e83801fa4cfe0ebeb..0c83974e60cbd76be05d8c5551651d863dff6ff2 100644 (file)
@@ -7,14 +7,13 @@ import (
        "os"
        "strings"
        "sync"
-       "time"
 
        "bazil.org/fuse"
        fusefs "bazil.org/fuse/fs"
-       "github.com/anacrolix/libtorgo/metainfo"
        "golang.org/x/net/context"
 
        "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 const (
@@ -37,18 +36,11 @@ type TorrentFS struct {
 
 var (
        _ fusefs.FSDestroyer = &TorrentFS{}
-       _ fusefs.FSIniter    = &TorrentFS{}
-)
-
-func (fs *TorrentFS) Init(ctx context.Context, req *fuse.InitRequest, resp *fuse.InitResponse) error {
-       log.Print(req)
-       log.Print(resp)
-       resp.MaxReadahead = req.MaxReadahead
-       resp.Flags |= fuse.InitAsyncRead
-       return nil
-}
 
-var _ fusefs.NodeForgetter = rootNode{}
+       _ fusefs.NodeForgetter      = rootNode{}
+       _ fusefs.HandleReadDirAller = rootNode{}
+       _ fusefs.HandleReadDirAller = dirNode{}
+)
 
 type rootNode struct {
        fs *TorrentFS
@@ -67,10 +59,10 @@ type fileNode struct {
        TorrentOffset int64
 }
 
-func (fn fileNode) Attr(attr *fuse.Attr) {
+func (fn fileNode) Attr(ctx context.Context, attr *fuse.Attr) error {
        attr.Size = fn.size
        attr.Mode = defaultMode
-       return
+       return nil
 }
 
 func (n *node) fsPath() string {
@@ -88,7 +80,10 @@ func blockingRead(ctx context.Context, fs *TorrentFS, t torrent.Torrent, off int
        )
        readDone := make(chan struct{})
        go func() {
-               _n, _err = t.ReadAt(p, off)
+               r := t.NewReader()
+               defer r.Close()
+               _n, _err = r.ReadAt(p, off)
+               log.Println(_n, p)
                close(readDone)
        }()
        select {
@@ -123,17 +118,9 @@ func readFull(ctx context.Context, fs *TorrentFS, t torrent.Torrent, off int64,
 
 func (fn fileNode) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
        torrentfsReadRequests.Add(1)
-       started := time.Now()
        if req.Dir {
                panic("read on directory")
        }
-       defer func() {
-               ms := time.Now().Sub(started).Nanoseconds() / 1000000
-               if ms < 20 {
-                       return
-               }
-               log.Printf("torrentfs read took %dms", ms)
-       }()
        size := req.Size
        fileLeft := int64(fn.size) - req.Offset
        if fileLeft < 0 {
@@ -232,23 +219,24 @@ func (dn dirNode) Lookup(ctx context.Context, name string) (_node fusefs.Node, e
        return
 }
 
-func (dn dirNode) Attr(attr *fuse.Attr) {
+func (dn dirNode) Attr(ctx context.Context, attr *fuse.Attr) error {
        attr.Mode = os.ModeDir | defaultMode
-       return
+       return nil
 }
 
 func (me rootNode) Lookup(ctx context.Context, name string) (_node fusefs.Node, err error) {
        for _, t := range me.fs.Client.Torrents() {
-               if t.Name() != name || t.Info == nil {
+               info := t.Info()
+               if t.Name() != name || info == nil {
                        continue
                }
                __node := node{
-                       metadata: t.Info,
+                       metadata: info,
                        FS:       me.fs,
                        t:        t,
                }
-               if !t.Info.IsDir() {
-                       _node = fileNode{__node, uint64(t.Info.Length), 0}
+               if !info.IsDir() {
+                       _node = fileNode{__node, uint64(info.Length), 0}
                } else {
                        _node = dirNode{__node}
                }
@@ -260,15 +248,16 @@ func (me rootNode) Lookup(ctx context.Context, name string) (_node fusefs.Node,
        return
 }
 
-func (me rootNode) ReadDir(ctx context.Context) (dirents []fuse.Dirent, err error) {
+func (me rootNode) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
        for _, t := range me.fs.Client.Torrents() {
-               if t.Info == nil {
+               info := t.Info()
+               if info == nil {
                        continue
                }
                dirents = append(dirents, fuse.Dirent{
-                       Name: t.Info.Name,
+                       Name: info.Name,
                        Type: func() fuse.DirentType {
-                               if !t.Info.IsDir() {
+                               if !info.IsDir() {
                                        return fuse.DT_File
                                } else {
                                        return fuse.DT_Dir
@@ -279,8 +268,9 @@ func (me rootNode) ReadDir(ctx context.Context) (dirents []fuse.Dirent, err erro
        return
 }
 
-func (rootNode) Attr(attr *fuse.Attr) {
+func (rootNode) Attr(ctx context.Context, attr *fuse.Attr) error {
        attr.Mode = os.ModeDir
+       return nil
 }
 
 // TODO(anacrolix): Why should rootNode implement this?
index f45b7d8173052e84e0659237a2484cd1bbd89c7a..c14cb21c8fa9e1d38487ca7fa92ea1cfa3677dbc 100644 (file)
@@ -4,6 +4,7 @@ import (
        "bytes"
        "fmt"
        "io/ioutil"
+       "log"
        "net"
        "net/http"
        _ "net/http/pprof"
@@ -16,16 +17,20 @@ import (
 
        "bazil.org/fuse"
        fusefs "bazil.org/fuse/fs"
-       "github.com/anacrolix/libtorgo/metainfo"
-       "golang.org/x/net/context"
+       netContext "golang.org/x/net/context"
 
        "github.com/anacrolix/torrent"
        "github.com/anacrolix/torrent/data"
        "github.com/anacrolix/torrent/data/mmap"
        "github.com/anacrolix/torrent/internal/testutil"
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/util"
 )
 
+func init() {
+       log.SetFlags(log.Flags() | log.Lshortfile)
+}
+
 func TestTCPAddrString(t *testing.T) {
        l, err := net.Listen("tcp4", "localhost:0")
        if err != nil {
@@ -92,31 +97,37 @@ func TestUnmountWedged(t *testing.T) {
                DataDir:         filepath.Join(layout.BaseDir, "incomplete"),
                DisableTrackers: true,
                NoDHT:           true,
+               ListenAddr:      "redonk",
+               DisableTCP:      true,
+               DisableUTP:      true,
 
                NoDefaultBlocklist: true,
        })
+       if err != nil {
+               t.Fatal(err)
+       }
        defer client.Close()
        client.AddTorrent(layout.Metainfo)
        fs := New(client)
        fuseConn, err := fuse.Mount(layout.MountDir)
        if err != nil {
-               if strings.Contains(err.Error(), "fuse") {
-                       t.Skip(err)
+               msg := fmt.Sprintf("error mounting: %s", err)
+               if strings.Contains(err.Error(), "fuse") || err.Error() == "exit status 71" {
+                       t.Skip(msg)
                }
-               t.Fatal(err)
+               t.Fatal(msg)
        }
        go func() {
-               server := fusefs.Server{
-                       FS: fs,
+               server := fusefs.New(fuseConn, &fusefs.Config{
                        Debug: func(msg interface{}) {
                                t.Log(msg)
                        },
-               }
-               server.Serve(fuseConn)
+               })
+               server.Serve(fs)
        }()
        <-fuseConn.Ready
        if err := fuseConn.MountError; err != nil {
-               t.Fatal(err)
+               t.Fatalf("mount error: %s", err)
        }
        // Read the greeting file, though it will never be available. This should
        // "wedge" FUSE, requiring the fs object to be forcibly destroyed. The
@@ -164,6 +175,7 @@ func TestDownloadOnDemand(t *testing.T) {
                DisableTrackers: true,
                NoDHT:           true,
                ListenAddr:      ":0",
+               Seed:            true,
 
                NoDefaultBlocklist: true,
                // Ensure that the metainfo is obtained over the wire, since we added
@@ -226,18 +238,18 @@ func TestDownloadOnDemand(t *testing.T) {
        fs := New(leecher)
        defer fs.Destroy()
        root, _ := fs.Root()
-       node, _ := root.(fusefs.NodeStringLookuper).Lookup(context.Background(), "greeting")
+       node, _ := root.(fusefs.NodeStringLookuper).Lookup(netContext.Background(), "greeting")
        var attr fuse.Attr
-       node.Attr(&attr)
+       node.Attr(netContext.Background(), &attr)
        size := attr.Size
        resp := &fuse.ReadResponse{
                Data: make([]byte, size),
        }
-       node.(fusefs.HandleReader).Read(context.Background(), &fuse.ReadRequest{
+       node.(fusefs.HandleReader).Read(netContext.Background(), &fuse.ReadRequest{
                Size: int(size),
        }, resp)
        content := resp.Data
        if string(content) != testutil.GreetingFileContents {
-               t.FailNow()
+               t.Fatalf("%q != %q", string(content), testutil.GreetingFileContents)
        }
 }
index d26cbdcd2d1b55ae09254be760e74fc1d08e48a8..169ebbc7ee2ef50356c7d679ff14ffdb5fa0690f 100644 (file)
@@ -10,7 +10,11 @@ import (
 
 // Maintains piece integers by their ascending assigned keys.
 type Instance struct {
-       sl        *skiplist.SkipList
+       // Contains the ascending priority keys. The keys contain a slice of piece
+       // indices.
+       sl *skiplist.SkipList
+       // Maps from piece index back to its key, so that it can be remove
+       // efficiently from the skip list.
        pieceKeys map[int]int
 }
 
@@ -41,6 +45,7 @@ func (me *Instance) SetPiece(piece, key int) {
        me.shuffleItem(key)
 }
 
+// Shuffle the piece indices that share a given key.
 func (me *Instance) shuffleItem(key int) {
        _item, ok := me.sl.Get(key)
        if !ok {
index af1230f6f0f8bfff82efc5afe22c9b2a6e80d34a..552f9a8eb39a555a6f7d3bed51a5dca80df823f1 100644 (file)
@@ -12,7 +12,7 @@ import (
        "os"
        "path/filepath"
 
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/torrent/metainfo"
 )
 
 const GreetingFileContents = "hello, world\n"
index 93fb9b31ad8bfb7a63e5bcbf90eb18d4d2b04c5b..18d6b0a383c4a8455161fa355b2bcf6d28652acc 100644 (file)
@@ -1,9 +1,13 @@
+// Package iplist handles the P2P Plaintext Format described by
+// https://en.wikipedia.org/wiki/PeerGuardian#P2P_plaintext_format.
 package iplist
 
 import (
+       "bufio"
        "bytes"
        "errors"
        "fmt"
+       "io"
        "net"
        "sort"
 )
@@ -123,3 +127,36 @@ func ParseBlocklistP2PLine(l []byte) (r Range, ok bool, err error) {
        ok = true
        return
 }
+
+// Creates an IPList from a line-delimited P2P Plaintext file.
+func NewFromReader(f io.Reader) (ret *IPList, err error) {
+       var ranges []Range
+       // There's a lot of similar descriptions, so we maintain a pool and reuse
+       // them to reduce memory overhead.
+       uniqStrs := make(map[string]string)
+       scanner := bufio.NewScanner(f)
+       lineNum := 1
+       for scanner.Scan() {
+               r, ok, lineErr := ParseBlocklistP2PLine(scanner.Bytes())
+               if lineErr != nil {
+                       err = fmt.Errorf("error parsing line %d: %s", lineNum, lineErr)
+                       return
+               }
+               lineNum++
+               if !ok {
+                       continue
+               }
+               if s, ok := uniqStrs[r.Description]; ok {
+                       r.Description = s
+               } else {
+                       uniqStrs[r.Description] = r.Description
+               }
+               ranges = append(ranges, r)
+       }
+       err = scanner.Err()
+       if err != nil {
+               return
+       }
+       ret = New(ranges)
+       return
+}
index 14ceebd0e3292c37bb1aae70ca5ec1aca3c81aef..52a3abbf7be470d713b7ed7950ca6ca16434cdf0 100644 (file)
@@ -41,7 +41,7 @@ func TestParseMagnetURI(t *testing.T) {
        // Checking if the magnet instance struct is built correctly from parsing
        m, err = ParseMagnetURI(exampleMagnetURI)
        if err != nil || !reflect.DeepEqual(exampleMagnet, m) {
-               t.Errorf("ParseMagnetURI(%e) returned %v, expected %v", uri, m, exampleMagnet)
+               t.Errorf("ParseMagnetURI(%s) returned %v, expected %v", uri, m, exampleMagnet)
        }
 
        // empty string URI case
diff --git a/metainfo/README b/metainfo/README
new file mode 100644 (file)
index 0000000..6da37b8
--- /dev/null
@@ -0,0 +1 @@
+A library for manipulating ".torrent" files.
diff --git a/metainfo/_testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent b/metainfo/_testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent
new file mode 100644 (file)
index 0000000..492908c
Binary files /dev/null and b/metainfo/_testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent differ
diff --git a/metainfo/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent b/metainfo/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent
new file mode 100644 (file)
index 0000000..9ce7748
Binary files /dev/null and b/metainfo/_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent differ
diff --git a/metainfo/_testdata/continuum.torrent b/metainfo/_testdata/continuum.torrent
new file mode 100644 (file)
index 0000000..ac15b75
Binary files /dev/null and b/metainfo/_testdata/continuum.torrent differ
diff --git a/metainfo/_testdata/trackerless.torrent b/metainfo/_testdata/trackerless.torrent
new file mode 100644 (file)
index 0000000..b90f253
--- /dev/null
@@ -0,0 +1 @@
+d7:comment19:This is just a test10:created by12:Johnny Bravo13:creation datei1430648794e8:encoding5:UTF-84:infod6:lengthi1128e4:name12:testfile.bin12:piece lengthi32768e6:pieces20:Õ\88ë        =\91U\8cäiÃŽ^æ °Eâ?ÇÒe5:nodesll35:udp://tracker.openbittorrent.com:8035:udp://tracker.openbittorrent.com:80eee
\ No newline at end of file
diff --git a/metainfo/builder.go b/metainfo/builder.go
new file mode 100644 (file)
index 0000000..96df193
--- /dev/null
@@ -0,0 +1,602 @@
+package metainfo
+
+import (
+       "crypto/sha1"
+       "errors"
+       "hash"
+       "io"
+       "os"
+       "path/filepath"
+       "sort"
+       "time"
+
+       "github.com/anacrolix/torrent/bencode"
+)
+
+//----------------------------------------------------------------------------
+// Build
+//----------------------------------------------------------------------------
+
+// The Builder type is responsible for .torrent files construction. Just
+// instantiate it, call necessary methods and then call the .Build method. While
+// waiting for completion you can use 'status' channel to get status reports.
+type Builder struct {
+       batch_state
+       filesmap map[string]bool
+}
+
+// Adds a file to the builder queue. You may add one or more files.
+func (b *Builder) AddFile(filename string) {
+       if b.filesmap == nil {
+               b.filesmap = make(map[string]bool)
+       }
+
+       filename, err := filepath.Abs(filename)
+       if err != nil {
+               panic(err)
+       }
+       b.filesmap[filename] = true
+}
+
+// Defines a name of the future torrent file. For single file torrents it's the
+// recommended name of the contained file. For multiple files torrents it's the
+// recommended name of the directory in which all of them will be
+// stored. Calling this function is not required. In case if no name was
+// specified, the builder will try to automatically assign it. It will use the
+// name of the file if there is only one file in the queue or it will try to
+// find the rightmost common directory of all the queued files and use its name as
+// a torrent name. In case if name cannot be assigned automatically, it will use
+// "unknown" as a torrent name.
+func (b *Builder) SetName(name string) {
+       b.name = name
+}
+
+// Sets the length of a piece in the torrent file in bytes. The default is
+// 256kb.
+func (b *Builder) SetPieceLength(length int64) {
+       b.piece_length = length
+}
+
+// Sets the "private" flag. The default is false.
+func (b *Builder) SetPrivate(v bool) {
+       b.private = v
+}
+
+// Add announce URL group. TODO: better explanation.
+func (b *Builder) AddAnnounceGroup(group []string) {
+       b.announce_list = append(b.announce_list, group)
+}
+
+// Add DHT nodes URLs for trackerless mode
+func (b *Builder) AddDhtNodes(group []string) {
+       b.node_list = append(b.node_list, group)
+}
+
+// Sets creation date. The default is time.Now() when the .Build method was
+// called.
+func (b *Builder) SetCreationDate(date time.Time) {
+       b.creation_date = date
+}
+
+// Sets the comment. The default is no comment.
+func (b *Builder) SetComment(comment string) {
+       b.comment = comment
+}
+
+// Sets the "created by" parameter. The default is "libtorgo".
+func (b *Builder) SetCreatedBy(createdby string) {
+       b.created_by = createdby
+}
+
+// Sets the "encoding" parameter. The default is "UTF-8".
+func (b *Builder) SetEncoding(encoding string) {
+       b.encoding = encoding
+}
+
+// Add WebSeed URL to the list.
+func (b *Builder) AddWebSeedURL(url string) {
+       b.urls = append(b.urls, url)
+}
+
+// Finalizes the Builder state and makes a Batch out of it. After calling that
+// method, Builder becomes empty and you can use it to create another Batch if
+// you will.
+func (b *Builder) Submit() (*Batch, error) {
+       err := b.check_parameters()
+       if err != nil {
+               return nil, err
+       }
+       b.set_defaults()
+
+       batch := &Batch{
+               batch_state: b.batch_state,
+       }
+
+       const non_regular = os.ModeDir | os.ModeSymlink |
+               os.ModeDevice | os.ModeNamedPipe | os.ModeSocket
+
+       // convert a map to a slice, calculate sizes and split paths
+       batch.total_size = 0
+       batch.files = make([]file, 0, 10)
+       for f, _ := range b.filesmap {
+               var file file
+               fi, err := os.Stat(f)
+               if err != nil {
+                       return nil, err
+               }
+
+               if fi.Mode()&non_regular != 0 {
+                       return nil, errors.New(f + " is not a regular file")
+               }
+
+               file.abspath = f
+               file.splitpath = split_path(f)
+               file.size = fi.Size()
+               batch.files = append(batch.files, file)
+               batch.total_size += file.size
+       }
+
+       // find the rightmost common directory
+       if len(batch.files) == 1 {
+               sp := batch.files[0].splitpath
+               batch.default_name = sp[len(sp)-1]
+       } else {
+               common := batch.files[0].splitpath
+               for _, f := range batch.files {
+                       if len(common) > len(f.splitpath) {
+                               common = common[:len(f.splitpath)]
+                       }
+
+                       for i, n := 0, len(common); i < n; i++ {
+                               if common[i] != f.splitpath[i] {
+                                       common = common[:i]
+                                       break
+                               }
+                       }
+
+                       if len(common) == 0 {
+                               break
+                       }
+               }
+
+               if len(common) == 0 {
+                       return nil, errors.New("no common rightmost folder was found for a set of queued files")
+               }
+
+               // found the common folder, let's strip that part from splitpath
+               // and setup the default name
+               batch.default_name = common[len(common)-1]
+
+               lcommon := len(common)
+               for i := range batch.files {
+                       f := &batch.files[i]
+                       f.splitpath = f.splitpath[lcommon:]
+               }
+
+               // and finally sort the files
+               sort.Sort(file_slice(batch.files))
+       }
+
+       // reset the builder state
+       b.batch_state = batch_state{}
+       b.filesmap = nil
+
+       return batch, nil
+}
+
+func (b *Builder) set_defaults() {
+       if b.piece_length == 0 {
+               b.piece_length = 256 * 1024
+       }
+
+       if b.creation_date.IsZero() {
+               b.creation_date = time.Now()
+       }
+
+       if b.created_by == "" {
+               b.created_by = "libtorgo"
+       }
+
+       if b.encoding == "" {
+               b.encoding = "UTF-8"
+       }
+}
+
+func (b *Builder) check_parameters() error {
+       // should be at least one file
+       if len(b.filesmap) == 0 {
+               return errors.New("no files were queued")
+       }
+
+       // let's clean up the announce_list and node_list
+       b.announce_list = cleanUpLists(b.announce_list)
+       b.node_list = cleanUpLists(b.node_list)
+
+       if len(b.announce_list) == 0 && len(b.node_list) == 0 {
+               return errors.New("no announce group or DHT nodes specified")
+       }
+
+       // Either the node_list or announce_list can be present
+       // Never the both!
+       if len(b.announce_list) > 0 && len(b.node_list) > 0 {
+               return errors.New("announce group and nodes are mutually exclusive")
+       }
+
+       // and clean up the urls
+       b.urls = remove_empty_strings(b.urls)
+
+       return nil
+}
+
+func cleanUpLists(list [][]string) [][]string {
+       newList := make([][]string, 0, len(list))
+       for _, l := range list {
+               l = remove_empty_strings(l)
+
+               // discard empty announce groups
+               if len(l) == 0 {
+                       continue
+               }
+               newList = append(newList, l)
+       }
+       return newList
+}
+
+//----------------------------------------------------------------------------
+// Batch
+//----------------------------------------------------------------------------
+
+// Batch represents a snapshot of a builder state, ready for transforming it
+// into a torrent file. Note that Batch contains two accessor methods you might
+// be interested in. The TotalSize is the total size of all the files queued for
+// hashing, you will use it for status reporting. The DefaultName is an
+// automatically determined name of the torrent metainfo, you might want to use
+// it for naming the .torrent file itself.
+type Batch struct {
+       batch_state
+       files        []file
+       total_size   int64
+       default_name string
+}
+
+// Get a total size of all the files queued for hashing. Useful in conjunction
+// with status reports.
+func (b *Batch) TotalSize() int64 {
+       return b.total_size
+}
+
+// Get an automatically determined name of the future torrent metainfo. You can
+// use it for a .torrent file in case user hasn't provided it specifically.
+func (b *Batch) DefaultName() string {
+       return b.default_name
+}
+
+// Starts a process of building the torrent file. This function does everything
+// in a separate goroutine and uses up to 'nworkers' of goroutines to perform
+// SHA1 hashing. Therefore it will return almost immedately. It returns two
+// channels, the first one is for completion awaiting, the second one is for
+// getting status reports. Status report is a number of bytes hashed, you can
+// get the total amount of bytes by inspecting the Batch.TotalSize method return
+// value.
+func (b *Batch) Start(w io.Writer, nworkers int) (<-chan error, <-chan int64) {
+       if nworkers <= 0 {
+               nworkers = 1
+       }
+
+       completion := make(chan error)
+       status := make(chan int64)
+
+       go func() {
+               // prepare workers
+               workers := make([]*worker, nworkers)
+               free_workers := make(chan *worker, nworkers)
+               for i := 0; i < nworkers; i++ {
+                       workers[i] = new_worker(free_workers)
+               }
+               stop_workers := func() {
+                       for _, w := range workers {
+                               w.stop()
+                       }
+                       for _, w := range workers {
+                               w.wait_for_stop()
+                       }
+               }
+
+               // prepare files for reading
+               fr := files_reader{files: b.files}
+               npieces := b.total_size/b.piece_length + 1
+               b.pieces = make([]byte, 20*npieces)
+               hashed := int64(0)
+
+               // read all the pieces passing them to workers for hashing
+               var data []byte
+               for i := int64(0); i < npieces; i++ {
+                       if data == nil {
+                               data = make([]byte, b.piece_length)
+                       }
+
+                       nr, err := fr.Read(data)
+                       if err != nil {
+                               // EOF is not an eror if it was the last piece
+                               if err == io.EOF {
+                                       if i != npieces-1 {
+                                               stop_workers()
+                                               completion <- err
+                                               return
+                                       }
+                               } else {
+                                       stop_workers()
+                                       completion <- err
+                                       return
+                               }
+                       }
+
+                       // cut the data slice to the amount of actual data read
+                       data = data[:nr]
+                       w := <-free_workers
+                       data = w.queue(data, b.pieces[20*i:20*i+20])
+
+                       // update and try to send the status report
+                       if data != nil {
+                               hashed += int64(len(data))
+                               data = data[:cap(data)]
+
+                               select {
+                               case status <- hashed:
+                               default:
+                               }
+                       }
+               }
+               stop_workers()
+
+               // at this point the hash was calculated and we're ready to
+               // write the torrent file
+               err := b.write_torrent(w)
+               if err != nil {
+                       completion <- err
+                       return
+               }
+               completion <- nil
+       }()
+       return completion, status
+}
+
+func (b *Batch) write_torrent(w io.Writer) error {
+       var td MetaInfo
+
+       // Either announce or node lists are allowed - not both
+       if len(b.announce_list) != 0 {
+               td.Announce = b.announce_list[0][0]
+               if len(b.announce_list) != 1 || len(b.announce_list[0]) != 1 {
+                       td.AnnounceList = b.announce_list
+               }
+       }
+
+       if len(b.node_list) != 0 {
+               td.Nodes = b.node_list
+       }
+
+       td.CreationDate = b.creation_date.Unix()
+       td.Comment = b.comment
+       td.CreatedBy = b.created_by
+       td.Encoding = b.encoding
+       switch {
+       case len(b.urls) == 0:
+       case len(b.urls) == 1:
+               td.URLList = b.urls[0]
+       default:
+               td.URLList = b.urls
+       }
+
+       td.Info.PieceLength = b.piece_length
+       td.Info.Pieces = b.pieces
+       if b.name == "" {
+               td.Info.Name = b.default_name
+       } else {
+               td.Info.Name = b.name
+       }
+       if len(b.files) == 1 {
+               td.Info.Length = b.files[0].size
+       } else {
+               td.Info.Files = make([]FileInfo, len(b.files))
+               for i, f := range b.files {
+                       td.Info.Files[i] = FileInfo{
+                               Path:   f.splitpath,
+                               Length: f.size,
+                       }
+               }
+       }
+       td.Info.Private = b.private
+
+       e := bencode.NewEncoder(w)
+       return e.Encode(&td)
+}
+
+//----------------------------------------------------------------------------
+// misc stuff
+//----------------------------------------------------------------------------
+
+// splits path into components (dirs and files), works only on absolute paths
+func split_path(path string) []string {
+       var dir, file string
+       s := make([]string, 0, 5)
+
+       dir = path
+       for {
+               dir, file = filepath.Split(filepath.Clean(dir))
+               if file == "" {
+                       break
+               }
+               s = append(s, file)
+       }
+
+       // reverse the slice
+       for i, n := 0, len(s)/2; i < n; i++ {
+               i2 := len(s) - i - 1
+               s[i], s[i2] = s[i2], s[i]
+       }
+
+       return s
+}
+
+// just a common data between the Builder and the Batch
+type batch_state struct {
+       name          string
+       piece_length  int64
+       pieces        []byte
+       private       bool
+       announce_list [][]string
+       node_list     [][]string
+       creation_date time.Time
+       comment       string
+       created_by    string
+       encoding      string
+       urls          []string
+}
+
+type file struct {
+       abspath   string
+       splitpath []string
+       size      int64
+}
+
+type file_slice []file
+
+func (s file_slice) Len() int           { return len(s) }
+func (s file_slice) Less(i, j int) bool { return s[i].abspath < s[j].abspath }
+func (s file_slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+func remove_empty_strings(slice []string) []string {
+       j := 0
+       for i, n := 0, len(slice); i < n; i++ {
+               if slice[i] == "" {
+                       continue
+               }
+               slice[j] = slice[i]
+               j++
+       }
+       return slice[:j]
+}
+
+//----------------------------------------------------------------------------
+// worker
+//----------------------------------------------------------------------------
+
+type worker struct {
+       msgbox chan bool
+       hash   hash.Hash
+
+       // request
+       sha1 []byte
+       data []byte
+}
+
+// returns existing 'data'
+func (w *worker) queue(data, sha1 []byte) []byte {
+       d := w.data
+       w.data = data
+       w.sha1 = sha1
+       w.msgbox <- false
+       return d
+}
+
+func (w *worker) stop() {
+       w.msgbox <- true
+}
+
+func (w *worker) wait_for_stop() {
+       <-w.msgbox
+}
+
+func new_worker(out chan<- *worker) *worker {
+       w := &worker{
+               msgbox: make(chan bool),
+               hash:   sha1.New(),
+       }
+       go func() {
+               var sha1 [20]byte
+               for {
+                       if <-w.msgbox {
+                               w.msgbox <- true
+                               return
+                       }
+                       w.hash.Reset()
+                       w.hash.Write(w.data)
+                       w.hash.Sum(sha1[:0])
+                       copy(w.sha1, sha1[:])
+                       out <- w
+               }
+       }()
+       out <- w
+       return w
+}
+
+//----------------------------------------------------------------------------
+// files_reader
+//----------------------------------------------------------------------------
+
+type files_reader struct {
+       files   []file
+       cur     int
+       curfile *os.File
+       off     int64
+}
+
+func (f *files_reader) Read(data []byte) (int, error) {
+       if f.cur >= len(f.files) {
+               return 0, io.EOF
+       }
+
+       if len(data) == 0 {
+               return 0, nil
+       }
+
+       read := 0
+       for len(data) > 0 {
+               file := &f.files[f.cur]
+               if f.curfile == nil {
+                       var err error
+                       f.curfile, err = os.Open(file.abspath)
+                       if err != nil {
+                               return read, err
+                       }
+               }
+
+               // we need to read up to 'len(data)' bytes from current file
+               n := int64(len(data))
+
+               // unless there is not enough data in this file
+               if file.size-f.off < n {
+                       n = file.size - f.off
+               }
+
+               // if there is no data in this file, try next one
+               if n == 0 {
+                       err := f.curfile.Close()
+                       if err != nil {
+                               return read, err
+                       }
+
+                       f.curfile = nil
+                       f.off = 0
+                       f.cur++
+                       if f.cur >= len(f.files) {
+                               return read, io.EOF
+                       }
+                       continue
+               }
+
+               // read, handle errors
+               nr, err := f.curfile.Read(data[:n])
+               read += nr
+               f.off += int64(nr)
+               if err != nil {
+                       return read, err
+               }
+
+               // ok, we've read nr bytes out of len(data), cut the data slice
+               data = data[nr:]
+       }
+
+       return read, nil
+}
diff --git a/metainfo/metainfo.go b/metainfo/metainfo.go
new file mode 100644 (file)
index 0000000..ce3246a
--- /dev/null
@@ -0,0 +1,155 @@
+package metainfo
+
+import (
+       "crypto/sha1"
+       "io"
+       "os"
+
+       "github.com/anacrolix/torrent/bencode"
+)
+
+// Information specific to a single file inside the MetaInfo structure.
+type FileInfo struct {
+       Length int64    `bencode:"length"`
+       Path   []string `bencode:"path"`
+}
+
+// Load a MetaInfo from an io.Reader. Returns a non-nil error in case of
+// failure.
+func Load(r io.Reader) (*MetaInfo, error) {
+       var mi MetaInfo
+       d := bencode.NewDecoder(r)
+       err := d.Decode(&mi)
+       if err != nil {
+               return nil, err
+       }
+       return &mi, nil
+}
+
+// Convenience function for loading a MetaInfo from a file.
+func LoadFromFile(filename string) (*MetaInfo, error) {
+       f, err := os.Open(filename)
+       if err != nil {
+               return nil, err
+       }
+       defer f.Close()
+       return Load(f)
+}
+
+// The info dictionary.
+type Info struct {
+       PieceLength int64      `bencode:"piece length"`
+       Pieces      []byte     `bencode:"pieces"`
+       Name        string     `bencode:"name"`
+       Length      int64      `bencode:"length,omitempty"`
+       Private     bool       `bencode:"private,omitempty"`
+       Files       []FileInfo `bencode:"files,omitempty"`
+}
+
+func (me *Info) TotalLength() (ret int64) {
+       if me.IsDir() {
+               for _, fi := range me.Files {
+                       ret += fi.Length
+               }
+       } else {
+               ret = me.Length
+       }
+       return
+}
+
+func (me *Info) NumPieces() int {
+       return len(me.Pieces) / 20
+}
+
+type Piece interface {
+       Hash() []byte
+       Length() int64
+       Offset() int64
+}
+
+type piece struct {
+       Info *Info
+       i    int
+}
+
+func (me piece) Length() int64 {
+       if me.i == me.Info.NumPieces()-1 {
+               return me.Info.TotalLength() - int64(me.i)*me.Info.PieceLength
+       }
+       return me.Info.PieceLength
+}
+
+func (me piece) Offset() int64 {
+       return int64(me.i) * me.Info.PieceLength
+}
+
+func (me piece) Hash() []byte {
+       return me.Info.Pieces[me.i*20 : (me.i+1)*20]
+}
+
+func (me *Info) Piece(i int) piece {
+       return piece{me, i}
+}
+
+func (i *Info) IsDir() bool {
+       return len(i.Files) != 0
+}
+
+// The files field, converted up from the old single-file in the parent info
+// dict if necessary. This is a helper to avoid having to conditionally handle
+// single and multi-file torrent infos.
+func (i *Info) UpvertedFiles() []FileInfo {
+       if len(i.Files) == 0 {
+               return []FileInfo{{
+                       Length: i.Length,
+                       // Callers should determine that Info.Name is the basename, and
+                       // thus a regular file.
+                       Path: nil,
+               }}
+       }
+       return i.Files
+}
+
+// The info dictionary with its hash and raw bytes exposed, as these are
+// important to Bittorrent.
+type InfoEx struct {
+       Info
+       Hash  []byte
+       Bytes []byte
+}
+
+var (
+       _ bencode.Marshaler   = InfoEx{}
+       _ bencode.Unmarshaler = &InfoEx{}
+)
+
+func (this *InfoEx) UnmarshalBencode(data []byte) error {
+       this.Bytes = make([]byte, 0, len(data))
+       this.Bytes = append(this.Bytes, data...)
+       h := sha1.New()
+       _, err := h.Write(this.Bytes)
+       if err != nil {
+               panic(err)
+       }
+       this.Hash = h.Sum(nil)
+       return bencode.Unmarshal(data, &this.Info)
+}
+
+func (this InfoEx) MarshalBencode() ([]byte, error) {
+       if this.Bytes != nil {
+               return this.Bytes, nil
+       }
+       return bencode.Marshal(&this.Info)
+}
+
+type MetaInfo struct {
+       Info         InfoEx      `bencode:"info"`
+       Announce     string      `bencode:"announce,omitempty"`
+       AnnounceList [][]string  `bencode:"announce-list,omitempty"`
+       Nodes        [][]string  `bencode:"nodes,omitempty"`
+       CreationDate int64       `bencode:"creation date,omitempty"`
+       Comment      string      `bencode:"comment,omitempty"`
+       CreatedBy    string      `bencode:"created by,omitempty"`
+       Encoding     string      `bencode:"encoding,omitempty"`
+       URLList      interface{} `bencode:"url-list,omitempty"`
+}
diff --git a/metainfo/metainfo_test.go b/metainfo/metainfo_test.go
new file mode 100644 (file)
index 0000000..16746c5
--- /dev/null
@@ -0,0 +1,47 @@
+package metainfo
+
+import (
+       "bytes"
+       "path"
+       "testing"
+
+       "github.com/anacrolix/torrent/bencode"
+)
+
+func test_file(t *testing.T, filename string) {
+       mi, err := LoadFromFile(filename)
+       if err != nil {
+               t.Fatal(err)
+       }
+
+       if len(mi.Info.Files) == 1 {
+               t.Logf("Single file: %s (length: %d)\n", mi.Info.Name, mi.Info.Files[0].Length)
+       } else {
+               t.Logf("Multiple files: %s\n", mi.Info.Name)
+               for _, f := range mi.Info.Files {
+                       t.Logf(" - %s (length: %d)\n", path.Join(f.Path...), f.Length)
+               }
+       }
+
+       for _, group := range mi.AnnounceList {
+               for _, tracker := range group {
+                       t.Logf("Tracker: %s\n", tracker)
+               }
+       }
+       // for _, url := range mi.WebSeedURLs {
+       //      t.Logf("URL: %s\n", url)
+       // }
+
+       b, err := bencode.Marshal(mi.Info)
+       if !bytes.Equal(b, mi.Info.Bytes) {
+               t.Logf("\n%q\n%q", b[len(b)-20:], mi.Info.Bytes[len(mi.Info.Bytes)-20:])
+               t.Fatal("encoded and decoded bytes don't match")
+       }
+}
+
+func TestFile(t *testing.T) {
+       test_file(t, "_testdata/archlinux-2011.08.19-netinstall-i686.iso.torrent")
+       test_file(t, "_testdata/continuum.torrent")
+       test_file(t, "_testdata/23516C72685E8DB0C8F15553382A927F185C4F01.torrent")
+       test_file(t, "_testdata/trackerless.torrent")
+}
diff --git a/misc.go b/misc.go
index 873d73adfc0a6f580bf5b7648364113ff31f2e3c..532d4a72b9477f009b2c7d1fecab03cf0a4db817 100644 (file)
--- a/misc.go
+++ b/misc.go
@@ -4,18 +4,20 @@ import (
        "crypto"
        "errors"
        "fmt"
-       "math/rand"
-       "sync"
        "time"
 
-       "github.com/anacrolix/torrent/peer_protocol"
+       "github.com/anacrolix/torrent/metainfo"
+       pp "github.com/anacrolix/torrent/peer_protocol"
 )
 
 const (
-       pieceHash          = crypto.SHA1
-       maxRequests        = 250        // Maximum pending requests we allow peers to send us.
-       chunkSize          = 0x4000     // 16KiB
-       bep20              = "-GT0000-" // Peer ID client identifier prefix
+       pieceHash   = crypto.SHA1
+       maxRequests = 250    // Maximum pending requests we allow peers to send us.
+       chunkSize   = 0x4000 // 16KiB
+       // Peer ID client identifier prefix. We'll update this occasionally to
+       // reflect changes to client behaviour that other clients may depend on.
+       // Also see `extendedHandshakeClientVersion`.
+       bep20              = "-GT0001-"
        nominalDialTimeout = time.Second * 30
        minDialTimeout     = 5 * time.Second
 )
@@ -33,60 +35,22 @@ func (ih *InfoHash) HexString() string {
        return fmt.Sprintf("%x", ih[:])
 }
 
-type piecePriority byte
-
-const (
-       piecePriorityNone piecePriority = iota
-       piecePriorityNormal
-       piecePriorityReadahead
-       piecePriorityNext
-       piecePriorityNow
-)
-
-type piece struct {
-       Hash              pieceSum
-       PendingChunkSpecs map[chunkSpec]struct{}
-       Hashing           bool
-       QueuedForHash     bool
-       EverHashed        bool
-       Event             sync.Cond
-       Priority          piecePriority
-}
-
-func (p *piece) shuffledPendingChunkSpecs() (css []chunkSpec) {
-       if len(p.PendingChunkSpecs) == 0 {
-               return
-       }
-       css = make([]chunkSpec, 0, len(p.PendingChunkSpecs))
-       for cs := range p.PendingChunkSpecs {
-               css = append(css, cs)
-       }
-       if len(css) <= 1 {
-               return
-       }
-       for i := range css {
-               j := rand.Intn(i + 1)
-               css[i], css[j] = css[j], css[i]
-       }
-       return
-}
-
-func lastChunkSpec(pieceLength peer_protocol.Integer) (cs chunkSpec) {
+func lastChunkSpec(pieceLength pp.Integer) (cs chunkSpec) {
        cs.Begin = (pieceLength - 1) / chunkSize * chunkSize
        cs.Length = pieceLength - cs.Begin
        return
 }
 
 type chunkSpec struct {
-       Begin, Length peer_protocol.Integer
+       Begin, Length pp.Integer
 }
 
 type request struct {
-       Index peer_protocol.Integer
+       Index pp.Integer
        chunkSpec
 }
 
-func newRequest(index, begin, length peer_protocol.Integer) request {
+func newRequest(index, begin, length pp.Integer) request {
        return request{index, chunkSpec{begin, length}}
 }
 
@@ -118,3 +82,42 @@ func super(child interface{}) (parent interface{}, ok bool) {
        ok = parent != nil
        return
 }
+
+// Return the request that would include the given offset into the torrent data.
+func torrentOffsetRequest(torrentLength, pieceSize, chunkSize, offset int64) (
+       r request, ok bool) {
+       if offset < 0 || offset >= torrentLength {
+               return
+       }
+       r.Index = pp.Integer(offset / pieceSize)
+       r.Begin = pp.Integer(offset % pieceSize / chunkSize * chunkSize)
+       r.Length = pp.Integer(chunkSize)
+       pieceLeft := pp.Integer(pieceSize - int64(r.Begin))
+       if r.Length > pieceLeft {
+               r.Length = pieceLeft
+       }
+       torrentLeft := torrentLength - int64(r.Index)*pieceSize - int64(r.Begin)
+       if int64(r.Length) > torrentLeft {
+               r.Length = pp.Integer(torrentLeft)
+       }
+       ok = true
+       return
+}
+
+func torrentRequestOffset(torrentLength, pieceSize int64, r request) (off int64) {
+       off = int64(r.Index)*pieceSize + int64(r.Begin)
+       if off < 0 || off >= torrentLength {
+               panic("invalid request")
+       }
+       return
+}
+
+func validateInfo(info *metainfo.Info) error {
+       if len(info.Pieces)%20 != 0 {
+               return errors.New("pieces has invalid length")
+       }
+       if int((info.TotalLength()+info.PieceLength-1)/info.PieceLength) != info.NumPieces() {
+               return errors.New("piece count and file lengths are at odds")
+       }
+       return nil
+}
diff --git a/misc_test.go b/misc_test.go
new file mode 100644 (file)
index 0000000..a8bd10f
--- /dev/null
@@ -0,0 +1,15 @@
+package torrent
+
+import . "gopkg.in/check.v1"
+
+func (suite) TestTorrentOffsetRequest(c *C) {
+       check := func(tl, ps, off int64, expected request, ok bool) {
+               req, _ok := torrentOffsetRequest(tl, ps, chunkSize, off)
+               c.Check(_ok, Equals, ok)
+               c.Check(req, Equals, expected)
+       }
+       check(13, 5, 0, newRequest(0, 0, 5), true)
+       check(13, 5, 3, newRequest(0, 0, 5), true)
+       check(13, 5, 11, newRequest(2, 0, 3), true)
+       check(13, 5, 13, request{}, false)
+}
index 5db9588536d37ab4c70ceef7edc675b27e376406..622c179a282633d9de37649198e12622102492e8 100644 (file)
@@ -4,9 +4,7 @@ type sizer interface {
        Size() int64
 }
 
-type (
-       span []sizer
-)
+type span []sizer
 
 func (me span) ApplyTo(off int64, f func(int64, sizer) (stop bool)) {
        for _, interval := range me {
diff --git a/piece.go b/piece.go
new file mode 100644 (file)
index 0000000..a5e9947
--- /dev/null
+++ b/piece.go
@@ -0,0 +1,84 @@
+package torrent
+
+import (
+       "math/rand"
+       "sync"
+
+       pp "github.com/anacrolix/torrent/peer_protocol"
+)
+
+// Piece priority describes the importance of obtaining a particular piece.
+
+type piecePriority byte
+
+const (
+       PiecePriorityNone      piecePriority = iota // Not wanted.
+       PiecePriorityNormal                         // Wanted.
+       PiecePriorityReadahead                      // May be required soon.
+       PiecePriorityNext                           // Succeeds a piece where a read occurred.
+       PiecePriorityNow                            // A read occurred in this piece.
+)
+
+type piece struct {
+       // The completed piece SHA1 hash, from the metainfo "pieces" field.
+       Hash pieceSum
+       // Chunks we don't have. The offset and length can be determined by the
+       // request chunkSize in use.
+       PendingChunkSpecs []bool
+       Hashing           bool
+       QueuedForHash     bool
+       EverHashed        bool
+       Event             sync.Cond
+       Priority          piecePriority
+}
+
+func (p *piece) pendingChunk(cs chunkSpec) bool {
+       if p.PendingChunkSpecs == nil {
+               return false
+       }
+       return p.PendingChunkSpecs[chunkIndex(cs)]
+}
+
+func (p *piece) numPendingChunks() (ret int) {
+       for _, pending := range p.PendingChunkSpecs {
+               if pending {
+                       ret++
+               }
+       }
+       return
+}
+
+func (p *piece) unpendChunkIndex(i int) {
+       if p.PendingChunkSpecs == nil {
+               return
+       }
+       p.PendingChunkSpecs[i] = false
+}
+
+func chunkIndexSpec(index int, pieceLength pp.Integer) chunkSpec {
+       ret := chunkSpec{pp.Integer(index) * chunkSize, chunkSize}
+       if ret.Begin+ret.Length > pieceLength {
+               ret.Length = pieceLength - ret.Begin
+       }
+       return ret
+}
+
+func (p *piece) shuffledPendingChunkSpecs(pieceLength pp.Integer) (css []chunkSpec) {
+       if p.numPendingChunks() == 0 {
+               return
+       }
+       css = make([]chunkSpec, 0, p.numPendingChunks())
+       for i, pending := range p.PendingChunkSpecs {
+               if pending {
+                       css = append(css, chunkIndexSpec(i, pieceLength))
+               }
+       }
+       if len(css) <= 1 {
+               return
+       }
+       for i := range css {
+               j := rand.Intn(i + 1)
+               css[i], css[j] = css[j], css[i]
+       }
+       return
+}
diff --git a/piecestate.go b/piecestate.go
new file mode 100644 (file)
index 0000000..358ffcf
--- /dev/null
@@ -0,0 +1,18 @@
+package torrent
+
+// The current state of a piece.
+type PieceState struct {
+       Priority piecePriority
+       // The piece is available in its entirety.
+       Complete bool
+       // The piece is being hashed, or is queued for hash.
+       Checking bool
+       // Some of the piece has been obtained.
+       Partial bool
+}
+
+// Represents a series of consecutive pieces with the same state.
+type PieceStateRun struct {
+       PieceState
+       Length int // How many consecutive pieces have this state.
+}
diff --git a/reader.go b/reader.go
new file mode 100644 (file)
index 0000000..12437d5
--- /dev/null
+++ b/reader.go
@@ -0,0 +1,142 @@
+package torrent
+
+import (
+       "errors"
+       "io"
+       "os"
+)
+
+// Accesses torrent data via a client.
+type Reader struct {
+       t          *Torrent
+       pos        int64
+       responsive bool
+       readahead  int64
+}
+
+var _ io.ReadCloser = &Reader{}
+
+// Don't wait for pieces to complete and be verified. Read calls return as
+// soon as they can when the underlying chunks become available.
+func (r *Reader) SetResponsive() {
+       r.responsive = true
+}
+
+// Configure the number of bytes ahead of a read that should also be
+// prioritized in preparation for further reads.
+func (r *Reader) SetReadahead(readahead int64) {
+       r.readahead = readahead
+}
+
+func (r *Reader) raisePriorities(off int64, n int) {
+       if r.responsive {
+               r.t.cl.addUrgentRequests(r.t.torrent, off, n)
+       }
+       r.t.cl.readRaisePiecePriorities(r.t.torrent, off, int64(n)+r.readahead)
+}
+
+func (r *Reader) readable(off int64) (ret bool) {
+       // log.Println("readable", off)
+       // defer func() {
+       //      log.Println("readable", ret)
+       // }()
+       req, ok := r.t.offsetRequest(off)
+       if !ok {
+               panic(off)
+       }
+       if r.responsive {
+               return r.t.haveChunk(req)
+       }
+       return r.t.pieceComplete(int(req.Index))
+}
+
+// How many bytes are available to read. Max is the most we could require.
+func (r *Reader) available(off, max int64) (ret int64) {
+       for max > 0 {
+               req, ok := r.t.offsetRequest(off)
+               if !ok {
+                       break
+               }
+               if !r.t.haveChunk(req) {
+                       break
+               }
+               len1 := int64(req.Length) - (off - r.t.requestOffset(req))
+               max -= len1
+               ret += len1
+               off += len1
+       }
+       // Ensure that ret hasn't exceeded our original max.
+       if max < 0 {
+               ret += max
+       }
+       return
+}
+
+func (r *Reader) waitReadable(off int64) {
+       r.t.Pieces[off/int64(r.t.usualPieceSize())].Event.Wait()
+}
+
+func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
+       return r.readAt(b, off)
+}
+
+func (r *Reader) Read(b []byte) (n int, err error) {
+       n, err = r.readAt(b, r.pos)
+       r.pos += int64(n)
+       return
+}
+
+// Must only return EOF at the end of the torrent.
+func (r *Reader) readAt(b []byte, pos int64) (n int, err error) {
+       // defer func() {
+       //      log.Println(pos, n, err)
+       // }()
+       r.t.cl.mu.Lock()
+       defer r.t.cl.mu.Unlock()
+       maxLen := r.t.torrent.Info.TotalLength() - pos
+       if maxLen <= 0 {
+               err = io.EOF
+               return
+       }
+       if int64(len(b)) > maxLen {
+               b = b[:maxLen]
+       }
+again:
+       r.raisePriorities(pos, len(b))
+       for !r.readable(pos) {
+               r.raisePriorities(pos, len(b))
+               r.waitReadable(pos)
+       }
+       avail := r.available(pos, int64(len(b)))
+       // log.Println("available", avail)
+       b1 := b[:avail]
+       n, err = dataReadAt(r.t.data, b1, pos)
+       if n != 0 {
+               err = nil
+               return
+       }
+       if err == io.ErrUnexpectedEOF {
+               goto again
+       }
+       return
+}
+
+func (r *Reader) Close() error {
+       r.t = nil
+       return nil
+}
+
+func (r *Reader) Seek(off int64, whence int) (ret int64, err error) {
+       switch whence {
+       case os.SEEK_SET:
+               r.pos = off
+       case os.SEEK_CUR:
+               r.pos += off
+       case os.SEEK_END:
+               r.pos = r.t.torrent.Info.TotalLength() + off
+       default:
+               err = errors.New("bad whence")
+       }
+       ret = r.pos
+       return
+}
diff --git a/t.go b/t.go
new file mode 100644 (file)
index 0000000..4192586
--- /dev/null
+++ b/t.go
@@ -0,0 +1,54 @@
+package torrent
+
+import (
+       "github.com/anacrolix/torrent/metainfo"
+)
+
+// The public interface for a torrent within a Client.
+
+// A handle to a live torrent within a Client.
+type Torrent struct {
+       cl *Client
+       *torrent
+}
+
+// Closed when the info (.Info()) for the torrent has become available. Using
+// features of Torrent that require the info before it is available will have
+// undefined behaviour.
+func (t *Torrent) GotInfo() <-chan struct{} {
+       return t.torrent.gotMetainfo
+}
+
+func (t *Torrent) Info() *metainfo.Info {
+       return t.torrent.Info
+}
+
+// Returns a Reader bound to the torrent's data. All read calls block until
+// the data requested is actually available. Priorities are set to ensure the
+// data requested will be downloaded as soon as possible.
+func (t *Torrent) NewReader() (ret *Reader) {
+       ret = &Reader{
+               t:         t,
+               readahead: 5 * 1024 * 1024,
+       }
+       return
+}
+
+// Returns the state of pieces of the torrent. They are grouped into runs of
+// same state. The sum of the state run lengths is the number of pieces
+// in the torrent.
+func (t *Torrent) PieceStateRuns() []PieceStateRun {
+       t.stateMu.Lock()
+       defer t.stateMu.Unlock()
+       return t.torrent.pieceStateRuns()
+}
+
+func (t Torrent) NumPieces() int {
+       return t.numPieces()
+}
+
+func (t Torrent) Drop() {
+       t.cl.mu.Lock()
+       t.cl.dropTorrent(t.InfoHash)
+       t.cl.mu.Unlock()
+}
diff --git a/testdata/bootstrap.dat.torrent b/testdata/bootstrap.dat.torrent
new file mode 100644 (file)
index 0000000..e5cdeb7
Binary files /dev/null and b/testdata/bootstrap.dat.torrent differ
index 32fa54d7d27d95c9b6b5e900bd7914287cdf9596..50e2ed91c73f0566c18c48ed111757a3e9f6c861 100644 (file)
@@ -2,21 +2,20 @@ package torrent
 
 import (
        "container/heap"
-       "errors"
        "fmt"
        "io"
        "log"
        "net"
-       "os"
        "sort"
        "sync"
        "time"
 
-       "github.com/anacrolix/libtorgo/bencode"
-       "github.com/anacrolix/libtorgo/metainfo"
+       "github.com/anacrolix/missinggo"
        "github.com/bradfitz/iter"
 
+       "github.com/anacrolix/torrent/bencode"
        "github.com/anacrolix/torrent/data"
+       "github.com/anacrolix/torrent/metainfo"
        pp "github.com/anacrolix/torrent/peer_protocol"
        "github.com/anacrolix/torrent/tracker"
        "github.com/anacrolix/torrent/util"
@@ -27,15 +26,13 @@ func (t *torrent) pieceNumPendingBytes(index int) (count pp.Integer) {
                return 0
        }
        piece := t.Pieces[index]
+       pieceLength := t.pieceLength(index)
        if !piece.EverHashed {
-               return t.pieceLength(index)
-       }
-       pendingChunks := t.Pieces[index].PendingChunkSpecs
-       count = pp.Integer(len(pendingChunks)) * chunkSize
-       _lastChunkSpec := lastChunkSpec(t.pieceLength(index))
-       if _lastChunkSpec.Length != chunkSize {
-               if _, ok := pendingChunks[_lastChunkSpec]; ok {
-                       count += _lastChunkSpec.Length - chunkSize
+               return pieceLength
+       }
+       for i, pending := range piece.PendingChunkSpecs {
+               if pending {
+                       count += chunkIndexSpec(i, pieceLength).Length
                }
        }
        return
@@ -66,13 +63,16 @@ type torrent struct {
 
        InfoHash InfoHash
        Pieces   []*piece
+       // Chunks that are wanted before all others. This is for
+       // responsive/streaming readers that want to unblock ASAP.
+       urgent map[request]struct{}
        // Total length of the torrent in bytes. Stored because it's not O(1) to
        // get this from the info dict.
        length int64
 
        data StatefulData
 
-       // The info dict. Nil if we don't have it.
+       // The info dict. Nil if we don't have it (yet).
        Info *metainfo.Info
        // Active peer connections, running message stream loops.
        Conns []*connection
@@ -99,9 +99,6 @@ type torrent struct {
 
        // Closed when .Info is set.
        gotMetainfo chan struct{}
-       GotMetainfo <-chan struct{}
-
-       pruneTimer *time.Timer
 }
 
 func (t *torrent) pieceComplete(piece int) bool {
@@ -110,91 +107,6 @@ func (t *torrent) pieceComplete(piece int) bool {
        return t.data != nil && t.data.PieceComplete(piece)
 }
 
-// A file-like handle to torrent data that implements SectionOpener. Opened
-// sections will be reused so long as Reads and ReadAt's are contiguous.
-type handle struct {
-       rc     io.ReadCloser
-       rcOff  int64
-       curOff int64
-       so     SectionOpener
-       size   int64
-       t      Torrent
-}
-
-func (h *handle) Close() error {
-       if h.rc != nil {
-               return h.rc.Close()
-       }
-       return nil
-}
-
-func (h *handle) ReadAt(b []byte, off int64) (n int, err error) {
-       return h.readAt(b, off)
-}
-
-func (h *handle) readAt(b []byte, off int64) (n int, err error) {
-       avail := h.t.prepareRead(off)
-       if int64(len(b)) > avail {
-               b = b[:avail]
-       }
-       if int64(len(b)) > h.size-off {
-               b = b[:h.size-off]
-       }
-       if h.rcOff != off && h.rc != nil {
-               h.rc.Close()
-               h.rc = nil
-       }
-       if h.rc == nil {
-               h.rc, err = h.so.OpenSection(off, h.size-off)
-               if err != nil {
-                       return
-               }
-               h.rcOff = off
-       }
-       n, err = h.rc.Read(b)
-       h.rcOff += int64(n)
-       return
-}
-
-func (h *handle) Read(b []byte) (n int, err error) {
-       n, err = h.readAt(b, h.curOff)
-       h.curOff = h.rcOff
-       return
-}
-
-func (h *handle) Seek(off int64, whence int) (newOff int64, err error) {
-       switch whence {
-       case os.SEEK_SET:
-               h.curOff = off
-       case os.SEEK_CUR:
-               h.curOff += off
-       case os.SEEK_END:
-               h.curOff = h.size + off
-       default:
-               err = errors.New("bad whence")
-       }
-       newOff = h.curOff
-       return
-}
-
-// Implements Handle on top of an io.SectionReader.
-type sectionReaderHandle struct {
-       *io.SectionReader
-}
-
-func (sectionReaderHandle) Close() error { return nil }
-
-func (T Torrent) NewReadHandle() Handle {
-       if so, ok := T.data.(SectionOpener); ok {
-               return &handle{
-                       so:   so,
-                       size: T.Length(),
-                       t:    T,
-               }
-       }
-       return sectionReaderHandle{io.NewSectionReader(T, 0, T.Length())}
-}
-
 func (t *torrent) numConnsUnchoked() (num int) {
        for _, c := range t.Conns {
                if !c.PeerChoked {
@@ -217,12 +129,19 @@ func (t *torrent) addrActive(addr string) bool {
        return false
 }
 
-func (t *torrent) worstConnsHeap() (wcs *worstConns) {
+func (t *torrent) worstConns(cl *Client) (wcs *worstConns) {
        wcs = &worstConns{
-               c: append([]*connection{}, t.Conns...),
-               t: t,
+               c:  make([]*connection, 0, len(t.Conns)),
+               t:  t,
+               cl: cl,
+       }
+       for _, c := range t.Conns {
+               select {
+               case <-c.closing:
+               default:
+                       wcs.c = append(wcs.c, c)
+               }
        }
-       heap.Init(wcs)
        return
 }
 
@@ -238,7 +157,6 @@ func (t *torrent) ceaseNetworking() {
        for _, c := range t.Conns {
                c.Close()
        }
-       t.pruneTimer.Stop()
 }
 
 func (t *torrent) addPeer(p Peer) {
@@ -292,6 +210,11 @@ func infoPieceHashes(info *metainfo.Info) (ret []string) {
 
 // Called when metadata for a torrent becomes available.
 func (t *torrent) setMetadata(md *metainfo.Info, infoBytes []byte, eventLocker sync.Locker) (err error) {
+       err = validateInfo(md)
+       if err != nil {
+               err = fmt.Errorf("bad info: %s", err)
+               return
+       }
        t.Info = md
        t.length = 0
        for _, f := range t.Info.UpvertedFiles() {
@@ -374,38 +297,19 @@ func (t *torrent) Name() string {
        return ""
 }
 
-func (t *torrent) pieceStatusChar(index int) byte {
+func (t *torrent) pieceState(index int) (ret PieceState) {
        p := t.Pieces[index]
-       switch {
-       case t.pieceComplete(index):
-               return 'C'
-       case p.QueuedForHash:
-               return 'Q'
-       case p.Hashing:
-               return 'H'
-       case !p.EverHashed:
-               return '?'
-       case t.piecePartiallyDownloaded(index):
-               switch p.Priority {
-               case piecePriorityNone:
-                       return 'F' // Forgotten
-               default:
-                       return 'P'
-               }
-       default:
-               switch p.Priority {
-               case piecePriorityNone:
-                       return 'z'
-               case piecePriorityNow:
-                       return '!'
-               case piecePriorityReadahead:
-                       return 'R'
-               case piecePriorityNext:
-                       return 'N'
-               default:
-                       return '.'
-               }
+       ret.Priority = p.Priority
+       if t.pieceComplete(index) {
+               ret.Complete = true
+       }
+       if p.QueuedForHash || p.Hashing {
+               ret.Checking = true
+       }
+       if t.piecePartiallyDownloaded(index) {
+               ret.Partial = true
        }
+       return
 }
 
 func (t *torrent) metadataPieceSize(piece int) int {
@@ -431,63 +335,65 @@ func (t *torrent) newMetadataExtensionMessage(c *connection, msgType int, piece
        }
 }
 
-type PieceStatusCharSequence struct {
-       Char  byte // The state of this sequence of pieces.
-       Count int  // How many consecutive pieces have this state.
-}
-
-// Returns the state of pieces of the torrent. They are grouped into runs of
-// same state. The sum of the Counts of the sequences is the number of pieces
-// in the torrent. See the function torrent.pieceStatusChar for the possible
-// states.
-func (t *torrent) PieceStatusCharSequences() []PieceStatusCharSequence {
-       t.stateMu.Lock()
-       defer t.stateMu.Unlock()
-       return t.pieceStatusCharSequences()
+func (t *torrent) pieceStateRuns() (ret []PieceStateRun) {
+       rle := missinggo.NewRunLengthEncoder(func(el interface{}, count uint64) {
+               ret = append(ret, PieceStateRun{
+                       PieceState: el.(PieceState),
+                       Length:     int(count),
+               })
+       })
+       for index := range t.Pieces {
+               rle.Append(t.pieceState(index), 1)
+       }
+       rle.Flush()
+       return
 }
 
-// Returns the length of sequences of identical piece status chars.
-func (t *torrent) pieceStatusCharSequences() (ret []PieceStatusCharSequence) {
-       var (
-               char  byte
-               count int
-       )
-       writeSequence := func() {
-               ret = append(ret, PieceStatusCharSequence{char, count})
-       }
-       if len(t.Pieces) != 0 {
-               char = t.pieceStatusChar(0)
-       }
-       for index := range t.Pieces {
-               char1 := t.pieceStatusChar(index)
-               if char1 == char {
-                       count++
-               } else {
-                       writeSequence()
-                       char = char1
-                       count = 1
+// Produces a small string representing a PieceStateRun.
+func pieceStateRunStatusChars(psr PieceStateRun) (ret string) {
+       ret = fmt.Sprintf("%d", psr.Length)
+       ret += func() string {
+               switch psr.Priority {
+               case PiecePriorityNext:
+                       return "N"
+               case PiecePriorityNormal:
+                       return "."
+               case PiecePriorityReadahead:
+                       return "R"
+               case PiecePriorityNow:
+                       return "!"
+               default:
+                       return ""
                }
+       }()
+       if psr.Checking {
+               ret += "H"
        }
-       if count != 0 {
-               writeSequence()
+       if psr.Partial {
+               ret += "P"
+       }
+       if psr.Complete {
+               ret += "C"
        }
        return
 }
 
-func (t *torrent) writeStatus(w io.Writer) {
+func (t *torrent) writeStatus(w io.Writer, cl *Client) {
        fmt.Fprintf(w, "Infohash: %x\n", t.InfoHash)
        fmt.Fprintf(w, "Metadata length: %d\n", t.metadataSize())
-       fmt.Fprintf(w, "Metadata have: ")
-       for _, h := range t.metadataHave {
-               fmt.Fprintf(w, "%c", func() rune {
-                       if h {
-                               return 'H'
-                       } else {
-                               return '.'
-                       }
-               }())
+       if !t.haveInfo() {
+               fmt.Fprintf(w, "Metadata have: ")
+               for _, h := range t.metadataHave {
+                       fmt.Fprintf(w, "%c", func() rune {
+                               if h {
+                                       return 'H'
+                               } else {
+                                       return '.'
+                               }
+                       }())
+               }
+               fmt.Fprintln(w)
        }
-       fmt.Fprintln(w)
        fmt.Fprintf(w, "Piece length: %s\n", func() string {
                if t.haveInfo() {
                        return fmt.Sprint(t.usualPieceSize())
@@ -496,12 +402,18 @@ func (t *torrent) writeStatus(w io.Writer) {
                }
        }())
        if t.haveInfo() {
-               fmt.Fprint(w, "Pieces: ")
-               for _, seq := range t.pieceStatusCharSequences() {
-                       fmt.Fprintf(w, "%d%c ", seq.Count, seq.Char)
+               fmt.Fprint(w, "Pieces:")
+               for _, psr := range t.pieceStateRuns() {
+                       w.Write([]byte(" "))
+                       w.Write([]byte(pieceStateRunStatusChars(psr)))
                }
                fmt.Fprintln(w)
        }
+       fmt.Fprintf(w, "Urgent:")
+       for req := range t.urgent {
+               fmt.Fprintf(w, " %v", req)
+       }
+       fmt.Fprintln(w)
        fmt.Fprintf(w, "Trackers: ")
        for _, tier := range t.Trackers {
                for _, tr := range tier {
@@ -513,10 +425,12 @@ func (t *torrent) writeStatus(w io.Writer) {
        fmt.Fprintf(w, "Half open: %d\n", len(t.HalfOpen))
        fmt.Fprintf(w, "Active peers: %d\n", len(t.Conns))
        sort.Sort(&worstConns{
-               c: t.Conns,
-               t: t,
+               c:  t.Conns,
+               t:  t,
+               cl: cl,
        })
-       for _, c := range t.Conns {
+       for i, c := range t.Conns {
+               fmt.Fprintf(w, "%2d. ", i+1)
                c.WriteStatus(w, t)
        }
 }
@@ -530,7 +444,7 @@ func (t *torrent) String() string {
 }
 
 func (t *torrent) haveInfo() bool {
-       return t.Info != nil
+       return t != nil && t.Info != nil
 }
 
 // TODO: Include URIs that weren't converted to tracker clients.
@@ -574,7 +488,8 @@ func (t *torrent) bytesLeft() (left int64) {
 }
 
 func (t *torrent) piecePartiallyDownloaded(index int) bool {
-       return t.pieceNumPendingBytes(index) != t.pieceLength(index)
+       pendingBytes := t.pieceNumPendingBytes(index)
+       return pendingBytes != 0 && pendingBytes != t.pieceLength(index)
 }
 
 func numChunksForPiece(chunkSize int, pieceSize int) int {
@@ -630,53 +545,49 @@ func (t *torrent) close() (err error) {
        return
 }
 
-// Return the request that would include the given offset into the torrent data.
-func torrentOffsetRequest(torrentLength, pieceSize, chunkSize, offset int64) (
-       r request, ok bool) {
-       if offset < 0 || offset >= torrentLength {
-               return
-       }
-       r.Index = pp.Integer(offset / pieceSize)
-       r.Begin = pp.Integer(offset % pieceSize / chunkSize * chunkSize)
-       left := torrentLength - int64(r.Index)*pieceSize - int64(r.Begin)
-       if chunkSize < left {
-               r.Length = pp.Integer(chunkSize)
-       } else {
-               r.Length = pp.Integer(left)
-       }
-       ok = true
-       return
-}
-
-func torrentRequestOffset(torrentLength, pieceSize int64, r request) (off int64) {
-       off = int64(r.Index)*pieceSize + int64(r.Begin)
-       if off < 0 || off >= torrentLength {
-               panic("invalid request")
-       }
-       return
-}
-
 func (t *torrent) requestOffset(r request) int64 {
        return torrentRequestOffset(t.Length(), int64(t.usualPieceSize()), r)
 }
 
-// Return the request that would include the given offset into the torrent data.
+// Return the request that would include the given offset into the torrent
+// data. Returns !ok if there is no such request.
 func (t *torrent) offsetRequest(off int64) (req request, ok bool) {
        return torrentOffsetRequest(t.Length(), t.Info.PieceLength, chunkSize, off)
 }
 
 func (t *torrent) writeChunk(piece int, begin int64, data []byte) (err error) {
-       _, err = t.data.WriteAt(data, int64(piece)*t.Info.PieceLength+begin)
+       n, err := t.data.WriteAt(data, int64(piece)*t.Info.PieceLength+begin)
+       if err == nil && n != len(data) {
+               err = io.ErrShortWrite
+       }
        return
 }
 
 func (t *torrent) bitfield() (bf []bool) {
        for _, p := range t.Pieces {
-               bf = append(bf, p.EverHashed && len(p.PendingChunkSpecs) == 0)
+               // TODO: Check this logic.
+               bf = append(bf, p.EverHashed && p.numPendingChunks() == 0)
        }
        return
 }
 
+func (t *torrent) validOutgoingRequest(r request) bool {
+       if r.Index >= pp.Integer(t.Info.NumPieces()) {
+               return false
+       }
+       if r.Begin%chunkSize != 0 {
+               return false
+       }
+       if r.Length > chunkSize {
+               return false
+       }
+       pieceLength := t.pieceLength(int(r.Index))
+       if r.Begin+r.Length > pieceLength {
+               return false
+       }
+       return r.Length == chunkSize || r.Begin+r.Length == pieceLength
+}
+
 func (t *torrent) pieceChunks(piece int) (css []chunkSpec) {
        css = make([]chunkSpec, 0, (t.pieceLength(piece)+chunkSize-1)/chunkSize)
        var cs chunkSpec
@@ -691,16 +602,16 @@ func (t *torrent) pieceChunks(piece int) (css []chunkSpec) {
        return
 }
 
-func (t *torrent) pendAllChunkSpecs(index int) {
-       piece := t.Pieces[index]
+func (t *torrent) pendAllChunkSpecs(pieceIndex int) {
+       piece := t.Pieces[pieceIndex]
        if piece.PendingChunkSpecs == nil {
-               piece.PendingChunkSpecs = make(
-                       map[chunkSpec]struct{},
-                       (t.pieceLength(index)+chunkSize-1)/chunkSize)
+               // Allocate to exact size.
+               piece.PendingChunkSpecs = make([]bool, (t.pieceLength(pieceIndex)+chunkSize-1)/chunkSize)
        }
+       // Pend all the chunks.
        pcss := piece.PendingChunkSpecs
-       for _, cs := range t.pieceChunks(int(index)) {
-               pcss[cs] = struct{}{}
+       for i := range pcss {
+               pcss[i] = true
        }
        return
 }
@@ -757,29 +668,58 @@ func (t *torrent) havePiece(index int) bool {
 }
 
 func (t *torrent) haveChunk(r request) bool {
-       p := t.Pieces[r.Index]
-       if !p.EverHashed {
+       if !t.haveInfo() {
                return false
        }
-       _, ok := p.PendingChunkSpecs[r.chunkSpec]
-       return !ok
+       return !t.Pieces[r.Index].pendingChunk(r.chunkSpec)
+}
+
+func chunkIndex(cs chunkSpec) int {
+       return int(cs.Begin / chunkSize)
 }
 
+// TODO: This should probably be called wantPiece.
 func (t *torrent) wantChunk(r request) bool {
        if !t.wantPiece(int(r.Index)) {
                return false
        }
-       _, ok := t.Pieces[r.Index].PendingChunkSpecs[r.chunkSpec]
+       if t.Pieces[r.Index].pendingChunk(r.chunkSpec) {
+               return true
+       }
+       _, ok := t.urgent[r]
        return ok
 }
 
+func (t *torrent) urgentChunkInPiece(piece int) bool {
+       p := pp.Integer(piece)
+       for req := range t.urgent {
+               if req.Index == p {
+                       return true
+               }
+       }
+       return false
+}
+
+// TODO: This should be called wantPieceIndex.
 func (t *torrent) wantPiece(index int) bool {
        if !t.haveInfo() {
                return false
        }
        p := t.Pieces[index]
-       // Put piece complete check last, since it's the slowest!
-       return p.Priority != piecePriorityNone && !p.QueuedForHash && !p.Hashing && !t.pieceComplete(index)
+       if p.QueuedForHash {
+               return false
+       }
+       if p.Hashing {
+               return false
+       }
+       if p.Priority == PiecePriorityNone {
+               if !t.urgentChunkInPiece(index) {
+                       return false
+               }
+       }
+       // Put piece complete check last, since it's the slowest as it can involve
+       // calling out into external data stores.
+       return !t.pieceComplete(index)
 }
 
 func (t *torrent) connHasWantedPieces(c *connection) bool {
@@ -792,3 +732,18 @@ func (t *torrent) extentPieces(off, _len int64) (pieces []int) {
        }
        return
 }
+
+func (t *torrent) worstBadConn(cl *Client) *connection {
+       wcs := t.worstConns(cl)
+       heap.Init(wcs)
+       // A connection can only be bad if it's in the worst half, rounded down.
+       for wcs.Len() > (socketsPerTorrent+1)/2 {
+               c := heap.Pop(wcs).(*connection)
+               // Give connections 1 minute to prove themselves.
+               if time.Since(c.completedHandshake) < time.Minute {
+                       continue
+               }
+               return c
+       }
+       return nil
+}
index fdc96903a0ccf33e44f04f83f0e933482eb59b88..6134e28df26fd177e6ffb98767fb9ad400edd11f 100644 (file)
@@ -3,7 +3,6 @@ package torrent
 import (
        "sync"
        "testing"
-       "time"
 
        "github.com/anacrolix/torrent/peer_protocol"
 )
@@ -46,7 +45,6 @@ func TestTorrentRequest(t *testing.T) {
 
 func TestTorrentDoubleClose(t *testing.T) {
        tt, err := newTorrent(InfoHash{})
-       tt.pruneTimer = time.NewTimer(0)
        if err != nil {
                t.Fatal(err)
        }
index 59e9b9aec6df749e568cf8caace2441a9ded4a58..7d196482db4785f866976d13d879e80316696c27 100644 (file)
@@ -10,8 +10,7 @@ import (
        "net/url"
        "strconv"
 
-       "github.com/anacrolix/libtorgo/bencode"
-
+       "github.com/anacrolix/torrent/bencode"
        "github.com/anacrolix/torrent/util"
 )
 
index 4c795c5279f60e51a7d525d2d391311ab8690d0c..7021e776d6988978735ee07c62259231a6227fdf 100644 (file)
@@ -10,6 +10,8 @@ import (
        "net"
        "net/url"
        "time"
+
+       "github.com/anacrolix/torrent/util"
 )
 
 type Action int32
@@ -120,7 +122,7 @@ func (c *udpClient) Announce(req *AnnounceRequest) (res AnnounceResponse, err er
        res.Leechers = h.Leechers
        res.Seeders = h.Seeders
        for {
-               var p Peer
+               var p util.CompactPeer
                err = binary.Read(b, binary.BigEndian, &p)
                switch err {
                case nil:
index 9669e8343c98ff4aab78ac6c579a70916caef695..ed65c80fefda60fff04cf91613db3e237db08a84 100644 (file)
@@ -9,10 +9,10 @@ import (
        "path/filepath"
        "strings"
 
-       "github.com/anacrolix/libtorgo/metainfo"
        "github.com/go-fsnotify/fsnotify"
 
        "github.com/anacrolix/torrent"
+       "github.com/anacrolix/torrent/metainfo"
        "github.com/anacrolix/torrent/util"
 )
 
index cbf2872b7c48deaf14b4dc263f28ddd2ef650458..1ffff796c667dbbe405bc8c077a900b6188caa13 100644 (file)
@@ -7,7 +7,7 @@ import (
        "fmt"
        "io"
 
-       "github.com/anacrolix/libtorgo/bencode"
+       "github.com/anacrolix/torrent/bencode"
 )
 
 type CompactPeers []CompactPeer
index 69f67e93776a19e9946fd3a91f4eb65d1364b28b..0f7aa3aa7842b2c5190b259067b8b08fba7f7716 100644 (file)
@@ -6,12 +6,13 @@ import (
 
 // Implements heap functions such that [0] is the worst connection.
 type worstConns struct {
-       c []*connection
-       t *torrent
+       c  []*connection
+       t  *torrent
+       cl *Client
 }
 
-func (me worstConns) Len() int      { return len(me.c) }
-func (me worstConns) Swap(i, j int) { me.c[i], me.c[j] = me.c[j], me.c[i] }
+func (me *worstConns) Len() int      { return len(me.c) }
+func (me *worstConns) Swap(i, j int) { me.c[i], me.c[j] = me.c[j], me.c[i] }
 
 func (me *worstConns) Pop() (ret interface{}) {
        old := me.c
@@ -26,37 +27,33 @@ func (me *worstConns) Push(x interface{}) {
 }
 
 type worstConnsSortKey struct {
-       // Peer has something we want.
-       useless bool
-       // A fabricated duration since peer was last helpful.
-       age time.Duration
+       useful      bool
+       lastHelpful time.Time
+       connected   time.Time
 }
 
 func (me worstConnsSortKey) Less(other worstConnsSortKey) bool {
-       if me.useless != other.useless {
-               return me.useless
+       if me.useful != other.useful {
+               return !me.useful
        }
-       return me.age > other.age
+       if !me.lastHelpful.Equal(other.lastHelpful) {
+               return me.lastHelpful.Before(other.lastHelpful)
+       }
+       return me.connected.Before(other.connected)
 }
 
-func (me worstConns) key(i int) (key worstConnsSortKey) {
+func (me *worstConns) key(i int) (key worstConnsSortKey) {
        c := me.c[i]
-       // Peer has had time to declare what they have.
-       if time.Now().Sub(c.completedHandshake) >= 30*time.Second {
-               if !me.t.haveInfo() {
-                       key.useless = !c.supportsExtension("ut_metadata")
-               } else {
-                       if !me.t.connHasWantedPieces(c) {
-                               key.useless = true
-                       }
-               }
+       key.useful = me.cl.usefulConn(me.t, c)
+       if me.cl.seeding(me.t) {
+               key.lastHelpful = c.lastChunkSent
+       }
+       // Intentionally consider the last time a chunk was received when seeding,
+       // because we might go from seeding back to leeching.
+       if c.lastUsefulChunkReceived.After(key.lastHelpful) {
+               key.lastHelpful = c.lastUsefulChunkReceived
        }
-       key.age = time.Duration(1+3*c.UnwantedChunksReceived) * time.Now().Sub(func() time.Time {
-               if !c.lastUsefulChunkReceived.IsZero() {
-                       return c.lastUsefulChunkReceived
-               }
-               return c.completedHandshake.Add(-time.Minute)
-       }()) / time.Duration(1+c.UsefulChunksReceived)
+       key.connected = c.completedHandshake
        return
 }