2016-05-16 16:44:58 +08:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2020-01-09 19:58:23 +08:00
|
|
|
"bytes"
|
2021-02-01 09:22:31 +08:00
|
|
|
"fmt"
|
2020-01-09 19:58:23 +08:00
|
|
|
"io"
|
2016-05-16 16:44:58 +08:00
|
|
|
"path"
|
2020-01-09 19:58:23 +08:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
2020-11-21 10:42:42 +08:00
|
|
|
"sync"
|
2016-05-16 16:44:58 +08:00
|
|
|
|
2020-01-09 19:58:23 +08:00
|
|
|
"github.com/anacrolix/missinggo/v2/resource"
|
2019-08-21 18:58:40 +08:00
|
|
|
|
2016-05-16 16:44:58 +08:00
|
|
|
"github.com/anacrolix/torrent/metainfo"
|
|
|
|
)
|
|
|
|
|
|
|
|
type piecePerResource struct {
|
2021-01-18 16:29:53 +08:00
|
|
|
rp PieceProvider
|
|
|
|
opts ResourcePiecesOpts
|
|
|
|
}
|
|
|
|
|
|
|
|
type ResourcePiecesOpts struct {
|
|
|
|
LeaveIncompleteChunks bool
|
2021-01-19 07:47:46 +08:00
|
|
|
NoSizedPuts bool
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
|
2020-11-02 12:35:07 +08:00
|
|
|
func NewResourcePieces(p PieceProvider) ClientImpl {
|
2021-01-18 16:29:53 +08:00
|
|
|
return NewResourcePiecesOpts(p, ResourcePiecesOpts{})
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewResourcePiecesOpts(p PieceProvider, opts ResourcePiecesOpts) ClientImpl {
|
2016-05-16 16:44:58 +08:00
|
|
|
return &piecePerResource{
|
2021-01-19 07:47:25 +08:00
|
|
|
rp: p,
|
|
|
|
opts: opts,
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-11 09:40:43 +08:00
|
|
|
type piecePerResourceTorrentImpl struct {
|
|
|
|
piecePerResource
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
|
2020-10-11 09:40:43 +08:00
|
|
|
func (piecePerResourceTorrentImpl) Close() error {
|
2016-05-16 16:44:58 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-11 09:40:43 +08:00
|
|
|
func (s piecePerResource) OpenTorrent(info *metainfo.Info, infoHash metainfo.Hash) (TorrentImpl, error) {
|
|
|
|
return piecePerResourceTorrentImpl{s}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s piecePerResource) Piece(p metainfo.Piece) PieceImpl {
|
2016-05-16 16:44:58 +08:00
|
|
|
return piecePerResourcePiece{
|
2021-01-18 16:29:53 +08:00
|
|
|
mp: p,
|
|
|
|
piecePerResource: s,
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-02 12:35:07 +08:00
|
|
|
type PieceProvider interface {
|
|
|
|
resource.Provider
|
|
|
|
}
|
|
|
|
|
2021-02-01 09:22:31 +08:00
|
|
|
type ConsecutiveChunkReader interface {
|
|
|
|
ReadConsecutiveChunks(prefix string) (io.ReadCloser, error)
|
2020-11-02 12:35:07 +08:00
|
|
|
}
|
|
|
|
|
2016-05-16 16:44:58 +08:00
|
|
|
type piecePerResourcePiece struct {
|
2020-01-09 19:58:23 +08:00
|
|
|
mp metainfo.Piece
|
2021-01-18 16:29:53 +08:00
|
|
|
piecePerResource
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
|
2020-11-06 07:36:49 +08:00
|
|
|
var _ io.WriterTo = piecePerResourcePiece{}
|
2020-11-02 12:35:07 +08:00
|
|
|
|
2020-11-06 07:36:49 +08:00
|
|
|
func (s piecePerResourcePiece) WriteTo(w io.Writer) (int64, error) {
|
2021-02-01 09:22:31 +08:00
|
|
|
if s.mustIsComplete() {
|
|
|
|
r, err := s.completed().Get()
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("getting complete instance: %w", err)
|
2020-11-06 07:36:49 +08:00
|
|
|
}
|
2021-02-01 09:22:31 +08:00
|
|
|
defer r.Close()
|
|
|
|
return io.Copy(w, r)
|
|
|
|
}
|
|
|
|
if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
|
|
|
|
return s.writeConsecutiveIncompleteChunks(ccr, w)
|
2020-11-02 12:35:07 +08:00
|
|
|
}
|
2020-11-06 07:36:49 +08:00
|
|
|
return io.Copy(w, io.NewSectionReader(s, 0, s.mp.Length()))
|
|
|
|
}
|
|
|
|
|
2021-02-01 09:22:31 +08:00
|
|
|
func (s piecePerResourcePiece) writeConsecutiveIncompleteChunks(ccw ConsecutiveChunkReader, w io.Writer) (int64, error) {
|
|
|
|
r, err := ccw.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
defer r.Close()
|
|
|
|
return io.Copy(w, r)
|
2020-11-21 10:41:45 +08:00
|
|
|
}
|
|
|
|
|
2020-11-06 07:36:49 +08:00
|
|
|
// Returns if the piece is complete. Ok should be true, because we are the definitive source of
|
|
|
|
// truth here.
|
|
|
|
func (s piecePerResourcePiece) mustIsComplete() bool {
|
|
|
|
completion := s.Completion()
|
|
|
|
if !completion.Ok {
|
|
|
|
panic("must know complete definitively")
|
|
|
|
}
|
|
|
|
return completion.Complete
|
2020-11-02 12:35:07 +08:00
|
|
|
}
|
|
|
|
|
2017-10-12 13:09:32 +08:00
|
|
|
func (s piecePerResourcePiece) Completion() Completion {
|
2020-01-09 19:58:23 +08:00
|
|
|
fi, err := s.completed().Stat()
|
2017-10-12 13:09:32 +08:00
|
|
|
return Completion{
|
2020-01-09 19:58:23 +08:00
|
|
|
Complete: err == nil && fi.Size() == s.mp.Length(),
|
2017-10-12 13:09:32 +08:00
|
|
|
Ok: true,
|
|
|
|
}
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
|
2021-01-18 16:29:53 +08:00
|
|
|
type SizedPutter interface {
|
|
|
|
PutSized(io.Reader, int64) error
|
|
|
|
}
|
|
|
|
|
2016-05-16 16:44:58 +08:00
|
|
|
func (s piecePerResourcePiece) MarkComplete() error {
|
2020-01-09 19:58:23 +08:00
|
|
|
incompleteChunks := s.getChunks()
|
2021-02-01 09:22:31 +08:00
|
|
|
r, err := func() (io.ReadCloser, error) {
|
|
|
|
if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
|
|
|
|
return ccr.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
|
2020-11-21 10:41:45 +08:00
|
|
|
}
|
2021-02-01 09:22:31 +08:00
|
|
|
return io.NopCloser(io.NewSectionReader(incompleteChunks, 0, s.mp.Length())), nil
|
2020-11-21 10:41:45 +08:00
|
|
|
}()
|
2021-02-01 09:22:31 +08:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("getting incomplete chunks reader: %w", err)
|
|
|
|
}
|
|
|
|
defer r.Close()
|
2021-01-18 16:29:53 +08:00
|
|
|
completedInstance := s.completed()
|
2021-02-01 09:22:31 +08:00
|
|
|
err = func() error {
|
2021-01-19 07:47:46 +08:00
|
|
|
if sp, ok := completedInstance.(SizedPutter); ok && !s.opts.NoSizedPuts {
|
2021-01-18 16:29:53 +08:00
|
|
|
return sp.PutSized(r, s.mp.Length())
|
|
|
|
} else {
|
|
|
|
return completedInstance.Put(r)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if err == nil && !s.opts.LeaveIncompleteChunks {
|
|
|
|
// I think we do this synchronously here since we don't want callers to act on the completed
|
|
|
|
// piece if we're concurrently still deleting chunks. The caller may decide to start
|
|
|
|
// downloading chunks again and won't expect us to delete them. It seems to be much faster
|
|
|
|
// to let the resource provider do this if possible.
|
2020-11-21 10:42:42 +08:00
|
|
|
var wg sync.WaitGroup
|
2020-01-09 19:58:23 +08:00
|
|
|
for _, c := range incompleteChunks {
|
2020-11-21 10:42:42 +08:00
|
|
|
wg.Add(1)
|
|
|
|
go func(c chunk) {
|
|
|
|
defer wg.Done()
|
|
|
|
c.instance.Delete()
|
|
|
|
}(c)
|
2020-01-09 19:58:23 +08:00
|
|
|
}
|
2020-11-21 10:42:42 +08:00
|
|
|
wg.Wait()
|
2020-01-09 19:58:23 +08:00
|
|
|
}
|
|
|
|
return err
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
|
2016-09-02 13:10:57 +08:00
|
|
|
func (s piecePerResourcePiece) MarkNotComplete() error {
|
2020-01-09 19:58:23 +08:00
|
|
|
return s.completed().Delete()
|
2016-09-02 13:10:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s piecePerResourcePiece) ReadAt(b []byte, off int64) (int, error) {
|
2020-11-06 07:36:49 +08:00
|
|
|
if s.mustIsComplete() {
|
2020-01-09 19:58:23 +08:00
|
|
|
return s.completed().ReadAt(b, off)
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
2020-01-09 19:58:23 +08:00
|
|
|
return s.getChunks().ReadAt(b, off)
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s piecePerResourcePiece) WriteAt(b []byte, off int64) (n int, err error) {
|
2020-01-09 19:58:23 +08:00
|
|
|
i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), strconv.FormatInt(off, 10)))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
r := bytes.NewReader(b)
|
|
|
|
err = i.Put(r)
|
|
|
|
n = len(b) - r.Len()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type chunk struct {
|
|
|
|
offset int64
|
|
|
|
instance resource.Instance
|
|
|
|
}
|
|
|
|
|
|
|
|
type chunks []chunk
|
|
|
|
|
|
|
|
func (me chunks) ReadAt(b []byte, off int64) (int, error) {
|
|
|
|
for {
|
|
|
|
if len(me) == 0 {
|
|
|
|
return 0, io.EOF
|
|
|
|
}
|
|
|
|
if me[0].offset <= off {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
me = me[1:]
|
|
|
|
}
|
|
|
|
n, err := me[0].instance.ReadAt(b, off-me[0].offset)
|
|
|
|
if n == len(b) {
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
if err == nil || err == io.EOF {
|
|
|
|
n_, err := me[1:].ReadAt(b[n:], off+int64(n))
|
|
|
|
return n + n_, err
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s piecePerResourcePiece) getChunks() (chunks chunks) {
|
|
|
|
names, err := s.incompleteDir().Readdirnames()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, n := range names {
|
|
|
|
offset, err := strconv.ParseInt(n, 10, 64)
|
|
|
|
if err != nil {
|
2020-10-11 09:57:33 +08:00
|
|
|
panic(err)
|
2020-01-09 19:58:23 +08:00
|
|
|
}
|
|
|
|
i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), n))
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
chunks = append(chunks, chunk{offset, i})
|
|
|
|
}
|
|
|
|
sort.Slice(chunks, func(i, j int) bool {
|
|
|
|
return chunks[i].offset < chunks[j].offset
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-11-06 07:36:49 +08:00
|
|
|
func (s piecePerResourcePiece) completedInstancePath() string {
|
|
|
|
return path.Join("completed", s.mp.Hash().HexString())
|
|
|
|
}
|
|
|
|
|
2020-01-09 19:58:23 +08:00
|
|
|
func (s piecePerResourcePiece) completed() resource.Instance {
|
2020-11-06 07:36:49 +08:00
|
|
|
i, err := s.rp.NewInstance(s.completedInstancePath())
|
2020-01-09 19:58:23 +08:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s piecePerResourcePiece) incompleteDirPath() string {
|
|
|
|
return path.Join("incompleted", s.mp.Hash().HexString())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s piecePerResourcePiece) incompleteDir() resource.DirInstance {
|
|
|
|
i, err := s.rp.NewInstance(s.incompleteDirPath())
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return i.(resource.DirInstance)
|
2016-05-16 16:44:58 +08:00
|
|
|
}
|