+type peerId struct {
+ *Peer
+ ptr uintptr
+}
+
+func (p peerId) Uintptr() uintptr {
+ return p.ptr
+}
+
+func (p peerId) GobEncode() (b []byte, _ error) {
+ *(*reflect.SliceHeader)(unsafe.Pointer(&b)) = reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(&p.ptr)),
+ Len: int(unsafe.Sizeof(p.ptr)),
+ Cap: int(unsafe.Sizeof(p.ptr)),
+ }
+ return
+}
+
+func (p *peerId) GobDecode(b []byte) error {
+ if uintptr(len(b)) != unsafe.Sizeof(p.ptr) {
+ panic(len(b))
+ }
+ ptr := unsafe.Pointer(&b[0])
+ p.ptr = *(*uintptr)(ptr)
+ log.Printf("%p", ptr)
+ dst := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(&p.Peer)),
+ Len: int(unsafe.Sizeof(p.Peer)),
+ Cap: int(unsafe.Sizeof(p.Peer)),
+ }
+ copy(*(*[]byte)(unsafe.Pointer(&dst)), b)
+ return nil
+}
+
+type (
+ RequestIndex = requestStrategy.RequestIndex
+ chunkIndexType = requestStrategy.ChunkIndex
+)
+
+type desiredPeerRequests struct {
+ requestIndexes []RequestIndex
+ peer *Peer
+ pieceStates []requestStrategy.PieceRequestOrderState
+}
+
+func (p *desiredPeerRequests) Len() int {
+ return len(p.requestIndexes)
+}
+
+func (p *desiredPeerRequests) Less(i, j int) bool {
+ return p.lessByValue(p.requestIndexes[i], p.requestIndexes[j])
+}
+
+func (p *desiredPeerRequests) lessByValue(leftRequest, rightRequest RequestIndex) bool {
+ t := p.peer.t
+ leftPieceIndex := t.pieceIndexOfRequestIndex(leftRequest)
+ rightPieceIndex := t.pieceIndexOfRequestIndex(rightRequest)
+ ml := multiless.New()
+ // Push requests that can't be served right now to the end. But we don't throw them away unless
+ // there's a better alternative. This is for when we're using the fast extension and get choked
+ // but our requests could still be good when we get unchoked.
+ if p.peer.peerChoking {
+ ml = ml.Bool(
+ !p.peer.peerAllowedFast.Contains(leftPieceIndex),
+ !p.peer.peerAllowedFast.Contains(rightPieceIndex),
+ )
+ }
+ leftPiece := &p.pieceStates[leftPieceIndex]
+ rightPiece := &p.pieceStates[rightPieceIndex]
+ // Putting this first means we can steal requests from lesser-performing peers for our first few
+ // new requests.
+ priority := func() piecePriority {
+ // Technically we would be happy with the cached priority here, except we don't actually
+ // cache it anymore, and Torrent.piecePriority just does another lookup of *Piece to resolve
+ // the priority through Piece.purePriority, which is probably slower.
+ leftPriority := leftPiece.Priority
+ rightPriority := rightPiece.Priority
+ ml = ml.Int(
+ -int(leftPriority),
+ -int(rightPriority),
+ )
+ if !ml.Ok() {
+ if leftPriority != rightPriority {
+ panic("expected equal")
+ }