11 "github.com/RoaringBitmap/roaring"
12 "github.com/anacrolix/log"
14 "github.com/anacrolix/torrent/metainfo"
15 pp "github.com/anacrolix/torrent/peer_protocol"
16 "github.com/anacrolix/torrent/webseed"
20 webseedPeerUnhandledErrorSleep = 5 * time.Second
21 webseedPeerCloseOnUnhandledError = false
24 type webseedPeer struct {
25 // First field for stats alignment.
28 activeRequests map[Request]webseed.Request
29 requesterCond sync.Cond
30 lastUnhandledErr time.Time
33 var _ peerImpl = (*webseedPeer)(nil)
35 func (me *webseedPeer) peerImplStatusLines() []string {
38 fmt.Sprintf("last unhandled error: %v", eventAgeString(me.lastUnhandledErr)),
42 func (ws *webseedPeer) String() string {
43 return fmt.Sprintf("webseed peer for %q", ws.client.Url)
46 func (ws *webseedPeer) onGotInfo(info *metainfo.Info) {
47 ws.client.SetInfo(info)
48 // There should be probably be a callback in Client instead, so it can remove pieces at its whim
50 ws.client.Pieces.Iterate(func(x uint32) bool {
51 ws.peer.t.incPieceAvailability(pieceIndex(x))
56 func (ws *webseedPeer) writeInterested(interested bool) bool {
60 func (ws *webseedPeer) _cancel(r RequestIndex) bool {
61 if active, ok := ws.activeRequests[ws.peer.t.requestIndexToRequest(r)]; ok {
63 // The requester is running and will handle the result.
66 // There should be no requester handling this, so no further events will occur.
70 func (ws *webseedPeer) intoSpec(r Request) webseed.RequestSpec {
71 return webseed.RequestSpec{ws.peer.t.requestOffset(r), int64(r.Length)}
74 func (ws *webseedPeer) _request(r Request) bool {
75 ws.requesterCond.Signal()
79 func (ws *webseedPeer) doRequest(r Request) error {
80 webseedRequest := ws.client.NewRequest(ws.intoSpec(r))
81 ws.activeRequests[r] = webseedRequest
83 ws.requesterCond.L.Unlock()
84 defer ws.requesterCond.L.Lock()
85 return ws.requestResultHandler(r, webseedRequest)
87 delete(ws.activeRequests, r)
91 func (ws *webseedPeer) requester(i int) {
92 ws.requesterCond.L.Lock()
93 defer ws.requesterCond.L.Unlock()
95 for !ws.peer.closed.IsSet() {
96 // Restart is set if we don't need to wait for the requestCond before trying again.
98 ws.peer.requestState.Requests.Iterate(func(x RequestIndex) bool {
99 r := ws.peer.t.requestIndexToRequest(x)
100 if _, ok := ws.activeRequests[r]; ok {
103 err := ws.doRequest(r)
104 ws.requesterCond.L.Unlock()
105 if err != nil && !errors.Is(err, context.Canceled) {
106 log.Printf("requester %v: error doing webseed request %v: %v", i, r, err)
109 if errors.Is(err, webseed.ErrTooFast) {
110 time.Sleep(time.Duration(rand.Int63n(int64(10 * time.Second))))
112 // Demeter is throwing a tantrum on Mount Olympus for this
113 ws.peer.t.cl.locker().RLock()
114 duration := time.Until(ws.lastUnhandledErr.Add(webseedPeerUnhandledErrorSleep))
115 ws.peer.t.cl.locker().RUnlock()
117 ws.requesterCond.L.Lock()
123 ws.requesterCond.Wait()
127 func (ws *webseedPeer) connectionFlags() string {
131 // Maybe this should drop all existing connections, or something like that.
132 func (ws *webseedPeer) drop() {}
134 func (cn *webseedPeer) ban() {
138 func (ws *webseedPeer) handleUpdateRequests() {
139 // Because this is synchronous, webseed peers seem to get first dibs on newly prioritized
143 defer ws.peer.t.cl.unlock()
144 ws.peer.maybeUpdateActualRequestState()
148 func (ws *webseedPeer) onClose() {
149 ws.peer.logger.Levelf(log.Debug, "closing")
150 // Just deleting them means we would have to manually cancel active requests.
151 ws.peer.cancelAllRequests()
152 ws.peer.t.iterPeers(func(p *Peer) {
153 if p.isLowOnRequests() {
154 p.updateRequests("webseedPeer.onClose")
157 ws.requesterCond.Broadcast()
160 func (ws *webseedPeer) requestResultHandler(r Request, webseedRequest webseed.Request) error {
161 result := <-webseedRequest.Result
162 close(webseedRequest.Result) // one-shot
163 // We do this here rather than inside receiveChunk, since we want to count errors too. I'm not
164 // sure if we can divine which errors indicate cancellation on our end without hitting the
166 if len(result.Bytes) != 0 || result.Err == nil {
167 // Increment ChunksRead and friends
168 ws.peer.doChunkReadStats(int64(len(result.Bytes)))
170 ws.peer.readBytes(int64(len(result.Bytes)))
172 defer ws.peer.t.cl.unlock()
173 if ws.peer.t.closed.IsSet() {
179 case errors.Is(err, context.Canceled):
180 case errors.Is(err, webseed.ErrTooFast):
181 case ws.peer.closed.IsSet():
183 ws.peer.logger.Printf("Request %v rejected: %v", r, result.Err)
184 // // Here lies my attempt to extract something concrete from Go's error system. RIP.
185 // cfg := spew.NewDefaultConfig()
186 // cfg.DisableMethods = true
187 // cfg.Dump(result.Err)
189 if webseedPeerCloseOnUnhandledError {
190 log.Printf("closing %v", ws)
193 ws.lastUnhandledErr = time.Now()
196 if !ws.peer.remoteRejectedRequest(ws.peer.t.requestIndexFromRequest(r)) {
197 panic("invalid reject")
201 err = ws.peer.receiveChunk(&pp.Message{
213 func (me *webseedPeer) peerPieces() *roaring.Bitmap {
214 return &me.client.Pieces
217 func (cn *webseedPeer) peerHasAllPieces() (all, known bool) {
218 if !cn.peer.t.haveInfo() {
221 return cn.client.Pieces.GetCardinality() == uint64(cn.peer.t.numPieces()), true