12 "github.com/anacrolix/missinggo/v2/resource"
14 "github.com/anacrolix/torrent/metainfo"
17 type piecePerResource struct {
19 opts ResourcePiecesOpts
22 type ResourcePiecesOpts struct {
23 // After marking a piece complete, don't bother deleting its incomplete blobs.
24 LeaveIncompleteChunks bool
25 // Sized puts require being able to stream from a statement executed on another connection.
26 // Without them, we buffer the entire read and then put that.
31 func NewResourcePieces(p PieceProvider) ClientImpl {
32 return NewResourcePiecesOpts(p, ResourcePiecesOpts{})
35 func NewResourcePiecesOpts(p PieceProvider, opts ResourcePiecesOpts) ClientImpl {
36 return &piecePerResource{
42 type piecePerResourceTorrentImpl struct {
47 func (piecePerResourceTorrentImpl) Close() error {
51 func (s piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
52 t := piecePerResourceTorrentImpl{
54 make([]sync.RWMutex, info.NumPieces()),
56 return TorrentImpl{Piece: t.Piece, Close: t.Close}, nil
59 func (s piecePerResourceTorrentImpl) Piece(p metainfo.Piece) PieceImpl {
60 return piecePerResourcePiece{
62 piecePerResource: s.piecePerResource,
63 mu: &s.locks[p.Index()],
67 type PieceProvider interface {
71 type ConsecutiveChunkReader interface {
72 ReadConsecutiveChunks(prefix string) (io.ReadCloser, error)
75 type piecePerResourcePiece struct {
78 // This protects operations that move complete/incomplete pieces around, which can trigger read
79 // errors that may cause callers to do more drastic things.
83 var _ io.WriterTo = piecePerResourcePiece{}
85 func (s piecePerResourcePiece) WriteTo(w io.Writer) (int64, error) {
88 if s.mustIsComplete() {
89 r, err := s.completed().Get()
91 return 0, fmt.Errorf("getting complete instance: %w", err)
96 if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
97 return s.writeConsecutiveIncompleteChunks(ccr, w)
99 return io.Copy(w, io.NewSectionReader(s, 0, s.mp.Length()))
102 func (s piecePerResourcePiece) writeConsecutiveIncompleteChunks(ccw ConsecutiveChunkReader, w io.Writer) (int64, error) {
103 r, err := ccw.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
111 // Returns if the piece is complete. Ok should be true, because we are the definitive source of
113 func (s piecePerResourcePiece) mustIsComplete() bool {
114 completion := s.Completion()
116 panic("must know complete definitively")
118 return completion.Complete
121 func (s piecePerResourcePiece) Completion() Completion {
124 fi, err := s.completed().Stat()
126 Complete: err == nil && fi.Size() == s.mp.Length(),
131 type SizedPutter interface {
132 PutSized(io.Reader, int64) error
135 func (s piecePerResourcePiece) MarkComplete() error {
138 incompleteChunks := s.getChunks()
139 r, err := func() (io.ReadCloser, error) {
140 if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
141 return ccr.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
143 return io.NopCloser(io.NewSectionReader(incompleteChunks, 0, s.mp.Length())), nil
146 return fmt.Errorf("getting incomplete chunks reader: %w", err)
149 completedInstance := s.completed()
151 if sp, ok := completedInstance.(SizedPutter); ok && !s.opts.NoSizedPuts {
152 return sp.PutSized(r, s.mp.Length())
154 return completedInstance.Put(r)
157 if err == nil && !s.opts.LeaveIncompleteChunks {
158 // I think we do this synchronously here since we don't want callers to act on the completed
159 // piece if we're concurrently still deleting chunks. The caller may decide to start
160 // downloading chunks again and won't expect us to delete them. It seems to be much faster
161 // to let the resource provider do this if possible.
162 var wg sync.WaitGroup
163 for _, c := range incompleteChunks {
175 func (s piecePerResourcePiece) MarkNotComplete() error {
178 return s.completed().Delete()
181 func (s piecePerResourcePiece) ReadAt(b []byte, off int64) (int, error) {
184 if s.mustIsComplete() {
185 return s.completed().ReadAt(b, off)
187 return s.getChunks().ReadAt(b, off)
190 func (s piecePerResourcePiece) WriteAt(b []byte, off int64) (n int, err error) {
193 i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), strconv.FormatInt(off, 10)))
197 r := bytes.NewReader(b)
198 if sp, ok := i.(SizedPutter); ok {
199 err = sp.PutSized(r, r.Size())
209 instance resource.Instance
214 func (me chunks) ReadAt(b []byte, off int64) (int, error) {
219 if me[0].offset <= off {
224 n, err := me[0].instance.ReadAt(b, off-me[0].offset)
228 if err == nil || err == io.EOF {
229 n_, err := me[1:].ReadAt(b[n:], off+int64(n))
235 func (s piecePerResourcePiece) getChunks() (chunks chunks) {
236 names, err := s.incompleteDir().Readdirnames()
240 for _, n := range names {
241 offset, err := strconv.ParseInt(n, 10, 64)
245 i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), n))
249 chunks = append(chunks, chunk{offset, i})
251 sort.Slice(chunks, func(i, j int) bool {
252 return chunks[i].offset < chunks[j].offset
257 func (s piecePerResourcePiece) completedInstancePath() string {
258 return path.Join("completed", s.mp.Hash().HexString())
261 func (s piecePerResourcePiece) completed() resource.Instance {
262 i, err := s.rp.NewInstance(s.completedInstancePath())
269 func (s piecePerResourcePiece) incompleteDirPath() string {
270 return path.Join("incompleted", s.mp.Hash().HexString())
273 func (s piecePerResourcePiece) incompleteDir() resource.DirInstance {
274 i, err := s.rp.NewInstance(s.incompleteDirPath())
278 return i.(resource.DirInstance)