12 "github.com/RoaringBitmap/roaring"
13 "github.com/anacrolix/torrent/common"
14 "github.com/anacrolix/torrent/metainfo"
15 "github.com/anacrolix/torrent/segments"
18 type RequestSpec = segments.Extent
20 type requestPartResult struct {
25 type requestPart struct {
28 result chan requestPartResult
34 Result chan RequestResult
37 func (r Request) Cancel() {
42 HttpClient *http.Client
44 fileIndex segments.Index
46 // The pieces we can request with the Url. We're more likely to ban/block at the file-level
47 // given that's how requests are mapped to webseeds, but the torrent.Client works at the piece
48 // level. We can map our file-level adjustments to the pieces here. This probably need to be
49 // private in the future, if Client ever starts removing pieces.
53 func (me *Client) SetInfo(info *metainfo.Info) {
54 if !strings.HasSuffix(me.Url, "/") && info.IsDir() {
55 // In my experience, this is a non-conforming webseed. For example the
56 // http://ia600500.us.archive.org/1/items URLs in archive.org torrents.
59 me.fileIndex = segments.NewIndex(common.LengthIterFromUpvertedFiles(info.UpvertedFiles()))
61 me.Pieces.AddRange(0, uint64(info.NumPieces()))
64 type RequestResult struct {
69 func (ws *Client) NewRequest(r RequestSpec) Request {
70 ctx, cancel := context.WithCancel(context.Background())
71 var requestParts []requestPart
72 if !ws.fileIndex.Locate(r, func(i int, e segments.Extent) bool {
73 req, err := NewRequest(ws.Url, i, ws.info, e.Start, e.Length)
77 req = req.WithContext(ctx)
80 result: make(chan requestPartResult, 1),
85 resp, err := ws.HttpClient.Do(req)
86 part.result <- requestPartResult{
92 requestParts = append(requestParts, part)
95 panic("request out of file bounds")
99 Result: make(chan RequestResult, 1),
102 b, err := readRequestPartResponses(ctx, requestParts)
103 req.Result <- RequestResult{
111 type ErrBadResponse struct {
113 Response *http.Response
116 func (me ErrBadResponse) Error() string {
120 func recvPartResult(ctx context.Context, buf io.Writer, part requestPart) error {
121 result := <-part.result
122 // Make sure there's no further results coming, it should be a one-shot channel.
124 if result.err != nil {
127 defer result.resp.Body.Close()
128 if ctx.Err() != nil {
131 switch result.resp.StatusCode {
132 case http.StatusPartialContent:
133 copied, err := io.Copy(buf, result.resp.Body)
137 if copied != part.e.Length {
138 return fmt.Errorf("got %v bytes, expected %v", copied, part.e.Length)
142 // This number is based on
143 // https://archive.org/download/BloodyPitOfHorror/BloodyPitOfHorror.asr.srt. It seems that
144 // archive.org might be using a webserver implementation that refuses to do partial
145 // responses to small files.
146 if part.e.Start < 48<<10 {
147 if part.e.Start != 0 {
148 log.Printf("resp status ok but requested range [url=%q, range=%q]",
150 part.req.Header.Get("Range"))
152 // Instead of discarding, we could try receiving all the chunks present in the response
153 // body. I don't know how one would handle multiple chunk requests resulting in an OK
154 // response for the same file. The request algorithm might be need to be smarter for
156 discarded, _ := io.CopyN(io.Discard, result.resp.Body, part.e.Start)
158 log.Printf("discarded %v bytes in webseed request response part", discarded)
160 _, err := io.CopyN(buf, result.resp.Body, part.e.Length)
163 return ErrBadResponse{"resp status ok but requested range", result.resp}
166 return ErrBadResponse{
167 fmt.Sprintf("unhandled response status code (%v)", result.resp.StatusCode),
173 func readRequestPartResponses(ctx context.Context, parts []requestPart) (_ []byte, err error) {
175 for _, part := range parts {
177 err = recvPartResult(ctx, &buf, part)
179 err = fmt.Errorf("reading %q at %q: %w", part.req.URL, part.req.Header.Get("Range"), err)
183 return buf.Bytes(), err