ms.mMaps = append(ms.mMaps, mMap)
}
+func (ms *MMapSpan) Flush() (errs []error) {
+ ms.mu.RLock()
+ defer ms.mu.RUnlock()
+ for _, mMap := range ms.mMaps {
+ err := mMap.Flush()
+ if err != nil {
+ errs = append(errs, err)
+ }
+ }
+ return
+}
+
func (ms *MMapSpan) Close() (errs []error) {
ms.mu.Lock()
defer ms.mu.Unlock()
_n := copyBytes(copyArgs(p, mMapBytes))
p = p[_n:]
n += _n
+
if segments.Int(_n) != e.Length {
panic(fmt.Sprintf("did %d bytes, expected to do %d", _n, e.Length))
}
return p.t.storage.Piece(p.Info())
}
+func (p *Piece) Flush() {
+ if p.t.storage.Flush != nil {
+ _ = p.t.storage.Flush()
+ }
+}
+
func (p *Piece) pendingChunkIndex(chunkIndex chunkIndexType) bool {
return !p.chunkIndexDirty(chunkIndex)
}
type TorrentImpl struct {
Piece func(p metainfo.Piece) PieceImpl
Close func() error
+ Flush func() error
// Storages that share the same space, will provide equal pointers. The function is called once
// to determine the storage for torrents sharing the same function pointer, and mutated in
// place.
span: span,
pc: s.pc,
}
- return TorrentImpl{Piece: t.Piece, Close: t.Close}, err
+ return TorrentImpl{Piece: t.Piece, Close: t.Close, Flush: t.Flush}, err
}
func (s *mmapClientImpl) Close() error {
}
return nil
}
+func (ts *mmapTorrentStorage) Flush() error {
+ errs := ts.span.Flush()
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ return nil
+}
type mmapStoragePiece struct {
pc PieceCompletionGetSetter
c._stats.incrementPiecesDirtiedGood()
}
t.clearPieceTouchers(piece)
+ hasDirty := p.hasDirtyChunks()
t.cl.unlock()
+ if hasDirty {
+ p.Flush() // You can be synchronous here!
+ }
err := p.Storage().MarkComplete()
if err != nil {
t.logger.Printf("%T: error marking piece complete %d: %s", t.storage, piece, err)